'before-ckrm_E15-rbce-vserver-support-merge'.
--- /dev/null
+deps_config := \
+ lib/Kconfig \
+ crypto/Kconfig \
+ security/selinux/Kconfig \
+ security/Kconfig \
+ kernel/vserver/Kconfig \
+ arch/i386/oprofile/Kconfig \
+ fs/nls/Kconfig \
+ fs/partitions/Kconfig \
+ fs/ncpfs/Kconfig \
+ fs/Kconfig \
+ drivers/usb/gadget/Kconfig \
+ drivers/usb/misc/Kconfig \
+ drivers/usb/serial/Kconfig \
+ drivers/usb/net/Kconfig \
+ drivers/usb/media/Kconfig \
+ drivers/usb/image/Kconfig \
+ drivers/usb/input/Kconfig \
+ drivers/usb/storage/Kconfig \
+ drivers/usb/class/Kconfig \
+ drivers/usb/host/Kconfig \
+ drivers/usb/core/Kconfig \
+ drivers/usb/Kconfig \
+ sound/oss/Kconfig \
+ sound/parisc/Kconfig \
+ sound/sparc/Kconfig \
+ sound/pcmcia/Kconfig \
+ sound/usb/Kconfig \
+ sound/arm/Kconfig \
+ sound/ppc/Kconfig \
+ sound/pci/Kconfig \
+ sound/isa/Kconfig \
+ sound/drivers/Kconfig \
+ sound/core/Kconfig \
+ sound/oss/dmasound/Kconfig \
+ sound/Kconfig \
+ drivers/video/logo/Kconfig \
+ drivers/video/console/Kconfig \
+ drivers/video/Kconfig \
+ drivers/media/common/Kconfig \
+ drivers/media/dvb/bt8xx/Kconfig \
+ drivers/media/dvb/b2c2/Kconfig \
+ drivers/media/dvb/ttusb-dec/Kconfig \
+ drivers/media/dvb/ttusb-budget/Kconfig \
+ drivers/media/dvb/ttpci/Kconfig \
+ drivers/media/dvb/frontends/Kconfig \
+ drivers/media/dvb/dvb-core/Kconfig \
+ drivers/media/dvb/Kconfig \
+ drivers/media/radio/Kconfig \
+ drivers/media/video/Kconfig \
+ drivers/media/Kconfig \
+ drivers/misc/Kconfig \
+ drivers/w1/Kconfig \
+ drivers/i2c/chips/Kconfig \
+ drivers/i2c/busses/Kconfig \
+ drivers/i2c/algos/Kconfig \
+ drivers/i2c/Kconfig \
+ drivers/char/pcmcia/Kconfig \
+ drivers/char/drm/Kconfig \
+ drivers/char/agp/Kconfig \
+ drivers/char/ftape/Kconfig \
+ drivers/char/watchdog/Kconfig \
+ drivers/char/ipmi/Kconfig \
+ drivers/serial/Kconfig \
+ drivers/char/Kconfig \
+ drivers/input/misc/Kconfig \
+ drivers/input/touchscreen/Kconfig \
+ drivers/input/joystick/iforce/Kconfig \
+ drivers/input/joystick/Kconfig \
+ drivers/input/mouse/Kconfig \
+ drivers/input/keyboard/Kconfig \
+ drivers/input/serio/Kconfig \
+ drivers/input/gameport/Kconfig \
+ drivers/input/Kconfig \
+ drivers/telephony/Kconfig \
+ drivers/isdn/hardware/eicon/Kconfig \
+ drivers/isdn/hardware/avm/Kconfig \
+ drivers/isdn/hardware/Kconfig \
+ drivers/isdn/capi/Kconfig \
+ drivers/isdn/hysdn/Kconfig \
+ drivers/isdn/tpam/Kconfig \
+ drivers/isdn/act2000/Kconfig \
+ drivers/isdn/sc/Kconfig \
+ drivers/isdn/pcbit/Kconfig \
+ drivers/isdn/icn/Kconfig \
+ drivers/isdn/hisax/Kconfig \
+ drivers/isdn/i4l/Kconfig \
+ drivers/isdn/Kconfig \
+ drivers/s390/net/Kconfig \
+ drivers/atm/Kconfig \
+ drivers/net/wan/Kconfig \
+ drivers/net/pcmcia/Kconfig \
+ drivers/net/wireless/Kconfig \
+ drivers/net/tokenring/Kconfig \
+ drivers/net/fec_8xx/Kconfig \
+ drivers/net/tulip/Kconfig \
+ drivers/net/arm/Kconfig \
+ drivers/net/arcnet/Kconfig \
+ drivers/net/Kconfig \
+ drivers/bluetooth/Kconfig \
+ net/bluetooth/hidp/Kconfig \
+ net/bluetooth/cmtp/Kconfig \
+ net/bluetooth/bnep/Kconfig \
+ net/bluetooth/rfcomm/Kconfig \
+ net/bluetooth/Kconfig \
+ drivers/net/irda/Kconfig \
+ net/irda/ircomm/Kconfig \
+ net/irda/irnet/Kconfig \
+ net/irda/irlan/Kconfig \
+ net/irda/Kconfig \
+ drivers/net/hamradio/Kconfig \
+ net/ax25/Kconfig \
+ net/sched/Kconfig \
+ drivers/net/appletalk/Kconfig \
+ net/ipx/Kconfig \
+ net/llc/Kconfig \
+ net/decnet/Kconfig \
+ net/sctp/Kconfig \
+ net/xfrm/Kconfig \
+ net/bridge/netfilter/Kconfig \
+ net/decnet/netfilter/Kconfig \
+ net/ipv6/netfilter/Kconfig \
+ net/ipv4/netfilter/Kconfig \
+ net/ipv6/Kconfig \
+ net/ipv4/ipvs/Kconfig \
+ net/ipv4/Kconfig \
+ net/Kconfig \
+ drivers/macintosh/Kconfig \
+ drivers/message/i2o/Kconfig \
+ drivers/ieee1394/Kconfig \
+ drivers/message/fusion/Kconfig \
+ drivers/md/Kconfig \
+ drivers/cdrom/Kconfig \
+ drivers/scsi/pcmcia/Kconfig \
+ drivers/scsi/arm/Kconfig \
+ drivers/scsi/qla2xxx/Kconfig \
+ drivers/scsi/aic7xxx/Kconfig.aic79xx \
+ drivers/scsi/aic7xxx/Kconfig.aic7xxx \
+ drivers/scsi/Kconfig \
+ drivers/ide/Kconfig \
+ drivers/s390/block/Kconfig \
+ drivers/block/paride/Kconfig \
+ drivers/block/Kconfig \
+ drivers/pnp/pnpbios/Kconfig \
+ drivers/pnp/isapnp/Kconfig \
+ drivers/pnp/Kconfig \
+ drivers/parport/Kconfig \
+ drivers/mtd/nand/Kconfig \
+ drivers/mtd/devices/Kconfig \
+ drivers/mtd/maps/Kconfig \
+ drivers/mtd/chips/Kconfig \
+ drivers/mtd/Kconfig \
+ drivers/base/Kconfig \
+ drivers/Kconfig \
+ fs/Kconfig.binfmt \
+ drivers/pci/hotplug/Kconfig \
+ drivers/pcmcia/Kconfig \
+ drivers/mca/Kconfig \
+ drivers/eisa/Kconfig \
+ drivers/pci/Kconfig \
+ drivers/cpufreq/Kconfig \
+ arch/i386/kernel/cpu/cpufreq/Kconfig \
+ drivers/acpi/Kconfig \
+ kernel/power/Kconfig \
+ drivers/firmware/Kconfig \
+ drivers/block/Kconfig.iosched \
+ init/Kconfig \
+ arch/i386/Kconfig
+
+.config include/linux/autoconf.h: $(deps_config)
+
+$(deps_config):
D: SRM environment driver (for Alpha systems)
P: 1024D/8399E1BB 250D 3BCF 7127 0D8C A444 A961 1DBD 5E75 8399 E1BB
-N: Thomas Gleixner
-E: tglx@linutronix.de
-D: NAND flash hardware support, JFFS2 on NAND flash
-
N: Richard E. Gooch
E: rgooch@atnf.csiro.au
D: parent process death signal to children
struct pci_dev *pdev;
...
- if (!pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
card->playback_enabled = 1;
} else {
card->playback_enabled = 0;
printk(KERN_WARN "%s: Playback disabled due to DMA limitations.\n",
card->name);
}
- if (!pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
card->record_enabled = 1;
} else {
card->record_enabled = 0;
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
preaction=<preaction type> preop=<preop type> start_now=x
- nowayout=x
The timeout is the number of seconds to the action, and the pretimeout
is the amount of seconds before the reset that the pre-timeout panic will
If start_now is set to 1, the watchdog timer will start running as
soon as the driver is loaded.
-If nowayout is set to 1, the watchdog timer will not stop when the
-watchdog device is closed. The default value of nowayout is true
-if the CONFIG_WATCHDOG_NOWAYOUT option is enabled, or false if not.
-
When compiled into the kernel, the kernel command line is available
for configuring the watchdog:
ipmi_watchdog.preaction=<preaction type>
ipmi_watchdog.preop=<preop type>
ipmi_watchdog.start_now=x
- ipmi_watchdog.nowayout=x
The options are the same as the module parameter options.
10/03/2003
Revised Feb 12, 2004 by Martine Silbermann
email: Martine.Silbermann@hp.com
- Revised Jun 25, 2004 by Tom L Nguyen
1. About this guide
-This guide describes the basics of Message Signaled Interrupts (MSI),
-the advantages of using MSI over traditional interrupt mechanisms,
-and how to enable your driver to use MSI or MSI-X. Also included is
-a Frequently Asked Questions.
+This guide describes the basics of Message Signaled Interrupts(MSI), the
+advantages of using MSI over traditional interrupt mechanisms, and how
+to enable your driver to use MSI or MSI-X. Also included is a Frequently
+Asked Questions.
2. Copyright 2003 Intel Corporation
the MSI/MSI-X capability structure in its PCI capability list. The
device function may implement both the MSI capability structure and
the MSI-X capability structure; however, the bus driver should not
-enable both.
+enable both, but instead enable only the MSI-X capability structure.
The MSI capability structure contains Message Control register,
Message Address register and Message Data register. These registers
support for better interrupt performance.
Using MSI enables the device functions to support two or more
-vectors, which can be configured to target different CPU's to
+vectors, which can be configure to target different CPU's to
increase scalability.
5. Configuring a driver to use MSI/MSI-X
By default, the kernel will not enable MSI/MSI-X on all devices that
-support this capability. The CONFIG_PCI_MSI kernel option
+support this capability. The CONFIG_PCI_USE_VECTOR kernel option
must be selected to enable MSI/MSI-X support.
-5.1 Including MSI/MSI-X support into the kernel
+5.1 Including MSI support into the kernel
-To allow MSI/MSI-X capable device drivers to selectively enable
-MSI/MSI-X (using pci_enable_msi()/pci_enable_msix() as described
-below), the VECTOR based scheme needs to be enabled by setting
-CONFIG_PCI_MSI during kernel config.
+To allow MSI-Capable device drivers to selectively enable MSI (using
+pci_enable_msi as described below), the VECTOR based scheme needs to
+be enabled by setting CONFIG_PCI_USE_VECTOR.
Since the target of the inbound message is the local APIC, providing
-CONFIG_X86_LOCAL_APIC must be enabled as well as CONFIG_PCI_MSI.
+CONFIG_PCI_USE_VECTOR is dependent on whether CONFIG_X86_LOCAL_APIC
+is enabled or not.
-5.2 Configuring for MSI support
-
-Due to the non-contiguous fashion in vector assignment of the
-existing Linux kernel, this version does not support multiple
-messages regardless of a device function is capable of supporting
-more than one vector. To enable MSI on a device function's MSI
-capability structure requires a device driver to call the function
-pci_enable_msi() explicitly.
-
-5.2.1 API pci_enable_msi
-
-int pci_enable_msi(struct pci_dev *dev)
+int pci_enable_msi(struct pci_dev *)
With this new API, any existing device driver, which like to have
-MSI enabled on its device function, must call this API to enable MSI
-A successful call will initialize the MSI capability structure
-with ONE vector, regardless of whether a device function is
+MSI enabled on its device function, must call this explicitly. A
+successful call will initialize the MSI/MSI-X capability structure
+with ONE vector, regardless of whether the device function is
capable of supporting multiple messages. This vector replaces the
pre-assigned dev->irq with a new MSI vector. To avoid the conflict
of new assigned vector with existing pre-assigned vector requires
-a device driver to call this API before calling request_irq().
-
-5.2.2 API pci_disable_msi
-
-void pci_disable_msi(struct pci_dev *dev)
-
-This API should always be used to undo the effect of pci_enable_msi()
-when a device driver is unloading. This API restores dev->irq with
-the pre-assigned IOAPIC vector and switches a device's interrupt
-mode to PCI pin-irq assertion/INTx emulation mode.
-
-Note that a device driver should always call free_irq() on MSI vector
-it has done request_irq() on before calling this API. Failure to do
-so results a BUG_ON() and a device will be left with MSI enabled and
-leaks its vector.
-
-5.2.3 MSI mode vs. legacy mode diagram
+the device driver to call this API before calling request_irq(...).
The below diagram shows the events, which switches the interrupt
mode on the MSI-capable device function between MSI mode and
| | <=============== | |
| MSI MODE | | PIN-IRQ ASSERTION MODE |
| | ===============> | |
- ------------ pci_disable_msi ------------------------
-
-
-Figure 1.0 MSI Mode vs. Legacy Mode
+ ------------ free_irq ------------------------
-In Figure 1.0, a device operates by default in legacy mode. Legacy
-in this context means PCI pin-irq assertion or PCI-Express INTx
-emulation. A successful MSI request (using pci_enable_msi()) switches
-a device's interrupt mode to MSI mode. A pre-assigned IOAPIC vector
-stored in dev->irq will be saved by the PCI subsystem and a new
-assigned MSI vector will replace dev->irq.
-
-To return back to its default mode, a device driver should always call
-pci_disable_msi() to undo the effect of pci_enable_msi(). Note that a
-device driver should always call free_irq() on MSI vector it has done
-request_irq() on before calling pci_disable_msi(). Failure to do so
-results a BUG_ON() and a device will be left with MSI enabled and
-leaks its vector. Otherwise, the PCI subsystem restores a device's
-dev->irq with a pre-assigned IOAPIC vector and marks released
-MSI vector as unused.
-
-Once being marked as unused, there is no guarantee that the PCI
-subsystem will reserve this MSI vector for a device. Depending on
-the availability of current PCI vector resources and the number of
-MSI/MSI-X requests from other drivers, this MSI may be re-assigned.
+5.2 Configuring for MSI support
-For the case where the PCI subsystem re-assigned this MSI vector
-another driver, a request to switching back to MSI mode may result
-in being assigned a different MSI vector or a failure if no more
-vectors are available.
+Due to the non-contiguous fashion in vector assignment of the
+existing Linux kernel, this version does not support multiple
+messages regardless of the device function is capable of supporting
+more than one vector. The bus driver initializes only entry 0 of
+this capability if pci_enable_msi(...) is called successfully by
+the device driver.
5.3 Configuring for MSI-X support
-Due to the ability of the system software to configure each vector of
-the MSI-X capability structure with an independent message address
-and message data, the non-contiguous fashion in vector assignment of
-the existing Linux kernel has no impact on supporting multiple
-messages on an MSI-X capable device functions. To enable MSI-X on
-a device function's MSI-X capability structure requires its device
-driver to call the function pci_enable_msix() explicitly.
-
-The function pci_enable_msix(), once invoked, enables either
-all or nothing, depending on the current availability of PCI vector
-resources. If the PCI vector resources are available for the number
-of vectors requested by a device driver, this function will configure
-the MSI-X table of the MSI-X capability structure of a device with
-requested messages. To emphasize this reason, for example, a device
-may be capable for supporting the maximum of 32 vectors while its
-software driver usually may request 4 vectors. It is recommended
-that the device driver should call this function once during the
+Both the MSI capability structure and the MSI-X capability structure
+share the same above semantics; however, due to the ability of the
+system software to configure each vector of the MSI-X capability
+structure with an independent message address and message data, the
+non-contiguous fashion in vector assignment of the existing Linux
+kernel has no impact on supporting multiple messages on an MSI-X
+capable device functions. By default, as mentioned above, ONE vector
+should be always allocated to the MSI-X capability structure at
+entry 0. The bus driver does not initialize other entries of the
+MSI-X table.
+
+Note that the PCI subsystem should have full control of a MSI-X
+table that resides in Memory Space. The software device driver
+should not access this table.
+
+To request for additional vectors, the device software driver should
+call function msi_alloc_vectors(). It is recommended that the
+software driver should call this function once during the
initialization phase of the device driver.
-Unlike the function pci_enable_msi(), the function pci_enable_msix()
-does not replace the pre-assigned IOAPIC dev->irq with a new MSI
-vector because the PCI subsystem writes the 1:1 vector-to-entry mapping
-into the field vector of each element contained in a second argument.
-Note that the pre-assigned IO-APIC dev->irq is valid only if the device
-operates in PIN-IRQ assertion mode. In MSI-X mode, any attempt of
-using dev->irq by the device driver to request for interrupt service
-may result unpredictabe behavior.
-
-For each MSI-X vector granted, a device driver is responsible to call
-other functions like request_irq(), enable_irq(), etc. to enable
-this vector with its corresponding interrupt service handler. It is
-a device driver's choice to assign all vectors with the same
-interrupt service handler or each vector with a unique interrupt
-service handler.
-
-5.3.1 Handling MMIO address space of MSI-X Table
-
-The PCI 3.0 specification has implementation notes that MMIO address
-space for a device's MSI-X structure should be isolated so that the
-software system can set different page for controlling accesses to
-the MSI-X structure. The implementation of MSI patch requires the PCI
-subsystem, not a device driver, to maintain full control of the MSI-X
-table/MSI-X PBA and MMIO address space of the MSI-X table/MSI-X PBA.
-A device driver is prohibited from requesting the MMIO address space
-of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem will fail
-enabling MSI-X on its hardware device when it calls the function
-pci_enable_msix().
-
-5.3.2 Handling MSI-X allocation
-
-Determining the number of MSI-X vectors allocated to a function is
-dependent on the number of MSI capable devices and MSI-X capable
-devices populated in the system. The policy of allocating MSI-X
-vectors to a function is defined as the following:
-
-#of MSI-X vectors allocated to a function = (x - y)/z where
-
-x = The number of available PCI vector resources by the time
- the device driver calls pci_enable_msix(). The PCI vector
- resources is the sum of the number of unassigned vectors
- (new) and the number of released vectors when any MSI/MSI-X
- device driver switches its hardware device back to a legacy
- mode or is hot-removed. The number of unassigned vectors
- may exclude some vectors reserved, as defined in parameter
- NR_HP_RESERVED_VECTORS, for the case where the system is
- capable of supporting hot-add/hot-remove operations. Users
- may change the value defined in NR_HR_RESERVED_VECTORS to
- meet their specific needs.
-
-y = The number of MSI capable devices populated in the system.
- This policy ensures that each MSI capable device has its
- vector reserved to avoid the case where some MSI-X capable
- drivers may attempt to claim all available vector resources.
-
-z = The number of MSI-X capable devices pupulated in the system.
- This policy ensures that maximum (x - y) is distributed
- evenly among MSI-X capable devices.
-
-Note that the PCI subsystem scans y and z during a bus enumeration.
-When the PCI subsystem completes configuring MSI/MSI-X capability
-structure of a device as requested by its device driver, y/z is
-decremented accordingly.
-
-5.3.3 Handling MSI-X shortages
-
-For the case where fewer MSI-X vectors are allocated to a function
-than requested, the function pci_enable_msix() will return the
-maximum number of MSI-X vectors available to the caller. A device
-driver may re-send its request with fewer or equal vectors indicated
-in a return. For example, if a device driver requests 5 vectors, but
-the number of available vectors is 3 vectors, a value of 3 will be a
-return as a result of pci_enable_msix() call. A function could be
-designed for its driver to use only 3 MSI-X table entries as
-different combinations as ABC--, A-B-C, A--CB, etc. Note that this
-patch does not support multiple entries with the same vector. Such
-attempt by a device driver to use 5 MSI-X table entries with 3 vectors
-as ABBCC, AABCC, BCCBA, etc will result as a failure by the function
-pci_enable_msix(). Below are the reasons why supporting multiple
-entries with the same vector is an undesirable solution.
-
- - The PCI subsystem can not determine which entry, which
- generated the message, to mask/unmask MSI while handling
- software driver ISR. Attempting to walk through all MSI-X
- table entries (2048 max) to mask/unmask any match vector
- is an undesirable solution.
-
- - Walk through all MSI-X table entries (2048 max) to handle
- SMP affinity of any match vector is an undesirable solution.
-
-5.3.4 API pci_enable_msix
-
-int pci_enable_msix(struct pci_dev *dev, u32 *entries, int nvec)
-
-This API enables a device driver to request the PCI subsystem
-for enabling MSI-X messages on its hardware device. Depending on
-the availability of PCI vectors resources, the PCI subsystem enables
-either all or nothing.
+The function msi_alloc_vectors(), once invoked, enables either
+all or nothing, depending on the current availability of vector
+resources. If no vector resources are available, the device function
+still works with ONE vector. If the vector resources are available
+for the number of vectors requested by the driver, this function
+will reconfigure the MSI-X capability structure of the device with
+additional messages, starting from entry 1. To emphasize this
+reason, for example, the device may be capable for supporting the
+maximum of 32 vectors while its software driver usually may request
+4 vectors.
+
+For each vector, after this successful call, the device driver is
+responsible to call other functions like request_irq(), enable_irq(),
+etc. to enable this vector with its corresponding interrupt service
+handler. It is the device driver's choice to have all vectors shared
+the same interrupt service handler or each vector with a unique
+interrupt service handler.
+
+In addition to the function msi_alloc_vectors(), another function
+msi_free_vectors() is provided to allow the software driver to
+release a number of vectors back to the vector resources. Once
+invoked, the PCI subsystem disables (masks) each vector released.
+These vectors are no longer valid for the hardware device and its
+software driver to use. Like free_irq, it recommends that the
+device driver should also call msi_free_vectors to release all
+additional vectors previously requested.
+
+int msi_alloc_vectors(struct pci_dev *dev, int *vector, int nvec)
+
+This API enables the software driver to request the PCI subsystem
+for additional messages. Depending on the number of vectors
+available, the PCI subsystem enables either all or nothing.
Argument dev points to the device (pci_dev) structure.
-
-Argument entries is a pointer of unsigned integer type. The number of
-elements is indicated in argument nvec. The content of each element
-will be mapped to the following struct defined in /driver/pci/msi.h.
-
-struct msix_entry {
- u16 vector; /* kernel uses to write alloc vector */
- u16 entry; /* driver uses to specify entry */
-};
-
-A device driver is responsible for initializing the field entry of
-each element with unique entry supported by MSI-X table. Otherwise,
--EINVAL will be returned as a result. A successful return of zero
-indicates the PCI subsystem completes initializing each of requested
-entries of the MSI-X table with message address and message data.
-Last but not least, the PCI subsystem will write the 1:1
-vector-to-entry mapping into the field vector of each element. A
-device driver is responsible of keeping track of allocated MSI-X
-vectors in its internal data structure.
-
+Argument vector is a pointer of integer type. The number of
+elements is indicated in argument nvec.
Argument nvec is an integer indicating the number of messages
requested.
+A return of zero indicates that the number of allocated vector is
+successfully allocated. Otherwise, indicate resources not
+available.
-A return of zero indicates that the number of MSI-X vectors is
-successfully allocated. A return of greater than zero indicates
-MSI-X vector shortage. Or a return of less than zero indicates
-a failure. This failure may be a result of duplicate entries
-specified in second argument, or a result of no available vector,
-or a result of failing to initialize MSI-X table entries.
-
-5.3.5 API pci_disable_msix
-
-void pci_disable_msix(struct pci_dev *dev)
+int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
-This API should always be used to undo the effect of pci_enable_msix()
-when a device driver is unloading. Note that a device driver should
-always call free_irq() on all MSI-X vectors it has done request_irq()
-on before calling this API. Failure to do so results a BUG_ON() and
-a device will be left with MSI-X enabled and leaks its vectors.
+This API enables the software driver to inform the PCI subsystem
+that it is willing to release a number of vectors back to the
+MSI resource pool. Once invoked, the PCI subsystem disables each
+MSI-X entry associated with each vector stored in the argument 2.
+These vectors are no longer valid for the hardware device and
+its software driver to use.
-5.3.6 MSI-X mode vs. legacy mode diagram
+Argument dev points to the device (pci_dev) structure.
+Argument vector is a pointer of integer type. The number of
+elements is indicated in argument nvec.
+Argument nvec is an integer indicating the number of messages
+released.
+A return of zero indicates that the number of allocated vectors
+is successfully released. Otherwise, indicates a failure.
-The below diagram shows the events, which switches the interrupt
-mode on the MSI-X capable device function between MSI-X mode and
-PIN-IRQ assertion mode (legacy).
-
- ------------ pci_enable_msix(,,n) ------------------------
- | | <=============== | |
- | MSI-X MODE | | PIN-IRQ ASSERTION MODE |
- | | ===============> | |
- ------------ pci_disable_msix ------------------------
-
-Figure 2.0 MSI-X Mode vs. Legacy Mode
-
-In Figure 2.0, a device operates by default in legacy mode. A
-successful MSI-X request (using pci_enable_msix()) switches a
-device's interrupt mode to MSI-X mode. A pre-assigned IOAPIC vector
-stored in dev->irq will be saved by the PCI subsystem; however,
-unlike MSI mode, the PCI subsystem will not replace dev->irq with
-assigned MSI-X vector because the PCI subsystem already writes the 1:1
-vector-to-entry mapping into the field vector of each element
-specified in second argument.
-
-To return back to its default mode, a device driver should always call
-pci_disable_msix() to undo the effect of pci_enable_msix(). Note that
-a device driver should always call free_irq() on all MSI-X vectors it
-has done request_irq() on before calling pci_disable_msix(). Failure
-to do so results a BUG_ON() and a device will be left with MSI-X
-enabled and leaks its vectors. Otherwise, the PCI subsystem switches a
-device function's interrupt mode from MSI-X mode to legacy mode and
-marks all allocated MSI-X vectors as unused.
-
-Once being marked as unused, there is no guarantee that the PCI
-subsystem will reserve these MSI-X vectors for a device. Depending on
-the availability of current PCI vector resources and the number of
-MSI/MSI-X requests from other drivers, these MSI-X vectors may be
-re-assigned.
-
-For the case where the PCI subsystem re-assigned these MSI-X vectors
-to other driver, a request to switching back to MSI-X mode may result
-being assigned with another set of MSI-X vectors or a failure if no
-more vectors are available.
-
-5.4 Handling function implementng both MSI and MSI-X capabilities
-
-For the case where a function implements both MSI and MSI-X
-capabilities, the PCI subsystem enables a device to run either in MSI
-mode or MSI-X mode but not both. A device driver determines whether it
-wants MSI or MSI-X enabled on its hardware device. Once a device
-driver requests for MSI, for example, it is prohibited to request for
-MSI-X; in other words, a device driver is not permitted to ping-pong
-between MSI mod MSI-X mode during a run-time.
-
-5.5 Hardware requirements for MSI/MSI-X support
-MSI/MSI-X support requires support from both system hardware and
+5.4 Hardware requirements for MSI support
+MSI support requires support from both system hardware and
individual hardware device functions.
-5.5.1 System hardware support
+5.4.1 System hardware support
Since the target of MSI address is the local APIC CPU, enabling
-MSI/MSI-X support in Linux kernel is dependent on whether existing
+MSI support in Linux kernel is dependent on whether existing
system hardware supports local APIC. Users should verify their
system whether it runs when CONFIG_X86_LOCAL_APIC=y.
In SMP environment, CONFIG_X86_LOCAL_APIC is automatically set;
however, in UP environment, users must manually set
CONFIG_X86_LOCAL_APIC. Once CONFIG_X86_LOCAL_APIC=y, setting
-CONFIG_PCI_MSI enables the VECTOR based scheme and
+CONFIG_PCI_USE_VECTOR enables the VECTOR based scheme and
the option for MSI-capable device drivers to selectively enable
-MSI/MSI-X.
+MSI (using pci_enable_msi as described below).
-Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI/MSI-X
-vector is allocated new during runtime and MSI/MSI-X support does not
-depend on BIOS support. This key independency enables MSI/MSI-X
-support on future IOxAPIC free platform.
+Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI
+vector is allocated new during runtime and MSI support does not
+depend on BIOS support. This key independency enables MSI support
+on future IOxAPIC free platform.
-5.5.2 Device hardware support
+5.4.2 Device hardware support
The hardware device function supports MSI by indicating the
MSI/MSI-X capability structure on its PCI capability list. By
default, this capability structure will not be initialized by
MSI-capable hardware is responsible for whether calling
pci_enable_msi or not. A return of zero indicates the kernel
successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI/MSI-X mode.
+device funtion. The device function is now running on MSI mode.
-5.6 How to tell whether MSI/MSI-X is enabled on device function
+5.5 How to tell whether MSI is enabled on device function
-At the driver level, a return of zero from the function call of
-pci_enable_msi()/pci_enable_msix() indicates to a device driver that
-its device function is initialized successfully and ready to run in
-MSI/MSI-X mode.
+At the driver level, a return of zero from pci_enable_msi(...)
+indicates to the device driver that its device function is
+initialized successfully and ready to run in MSI mode.
At the user level, users can use command 'cat /proc/interrupts'
-to display the vector allocated for a device and its interrupt
-MSI/MSI-X mode ("PCI MSI"/"PCI MSIX"). Below shows below MSI mode is
-enabled on a SCSI Adaptec 39320D Ultra320.
+to display the vector allocated for the device and its interrupt
+mode, as shown below.
CPU0 CPU1
0: 324639 0 IO-APIC-edge timer
option to the kernel via the tagged lists specifying the port, and
serial format options as described in
- Documentation/kernel-parameters.txt.
+ linux/Documentation/kernel-parameters.txt.
3. Detect the machine type
$Id: README.aztcd,v 2.60 1997/11/29 09:51:25 root Exp root $
- Readme-File Documentation/cdrom/aztcd
+ Readme-File /usr/src/Documentation/cdrom/aztcd
for
AZTECH CD-ROM CDA268-01A, ORCHID CD-3110,
OKANO/WEARNES CDD110, CONRAD TXC, CyCDROM CR520, CR540
A reworked and improved version called 'cdtester.c', which has yet more
features for testing CDROM-drives can be found in
-Documentation/cdrom/sbpcd, written by E.Moenkeberg.
+/usr/src/linux/Documentation/cdrom/sbpcd, written by E.Moenkeberg.
Werner Zimmermann
Fachhochschule fuer Technik Esslingen
+++ /dev/null
-CKRM I/O controller
-
-Last updated: Sep 21, 2004
-
-
-Intro
------
-
-CKRM's I/O scheduler is developed as a delta over a modified version of
-the Complete Fair Queuing scheduler (CFQ) that implements I/O priorities.
-The latter's original posting can be found at:
- http://www.ussg.iu.edu/hypermail/linux/kernel/0311.1/0019.html
-
-Please note that this is not the CFQ version currently in the linus kernel
-(2.6.8.1 at time of writing) which provides equal, not prioritized,
-bandwidth allocation amongst processes. Since the CFQ in the kernel is likely
-to eventually move towards I/O priority implementation, CKRM has not renamed
-the underlying I/O scheduler and simply replaces drivers/block/cfq-iosched.c
-with the modified version.
-
-Installation
-------------
-
-1. Configure "Disk I/O Resource Controller" under CKRM (see
-Documentation/ckrm/installation)
-
-2. After booting into the new kernel, load ckrm-io
- # modprobe ckrm-io
-
-3. Verify that reading /rcfs/taskclass/shares displays values for the
-I/O controller (res=cki).
-
-4. Mount sysfs for monitoring bandwidth received (temporary solution till
-a userlevel tool is developed)
- # mount -t sysfs none /sys
-
-
-Usage
------
-
-For brevity, we assume we are in the /rcfs/taskclass directory for all the
-code snippets below.
-
-Initially, the systemwide default class gets 100% of the I/O bandwidth.
-
- $ cat stats
-
- <display from other controllers, snipped>
- 20 total ioprio
- 20 unused/default ioprio
-
-The first value is the share of a class, as a parent. The second is the share
-of its default subclass. Initially the two are equal. As named subclasses get
-created and assigned shares, the default subclass' share (which equals the
-"unused" portion of the parent's allocation) dwindles.
-
-
-CFQ assigns one of 20 I/O priorities to all I/O requests. Each priority level
-gets a fixed proportion of the total bandwidth in increments of 5%. e.g.
- ioprio=1 gets 5%,
- ioprio=2 gets 10%.....
- all the way through ioprio=19 getting 95%
-
-ioprio=0 gets bandwidth only if no other priority level submits I/O i.e. it can
-get starved.
-ioprio=20 is considered realtime I/O and always gets priority.
-
-CKRM's I/O scheduler distributes these 20 priority levels amongst the hierarchy
-of classes according to the relative share of each class. Thus, root starts out
-with the total allocation of 20 initially. As children get created and shares
-assigned to them, root's allocation reduces. At any time, the sum of absolute
-share values of all classes equals 20.
-
-
-
-Class creation
---------------
-
- $ mkdir a
-
-Its initial share is zero. The parent's share values will be unchanged. Note
-that even classes with zero share get unused bandwidth under CFQ.
-
-Setting a new class share
--------------------------
-
- $ echo "res=cki,guarantee=20" > /rcfs/taskclass/a/shares
- Set cki shares to 20 -1 -1 -1
-
- $ echo a/shares
-
- res=cki,guarantee=20,limit=100,total_guarantee=100,max_limit=100
-
-The limit and max_limit fields can be ignored as they are not implemented.
-The absolute share of a is 20% of parent's absolute total (20) and can be seen
-through
- $ echo a/stats
-
- <snip>
- 4 total ioprio
- 4 unused/default ioprio
-
-Since a gets 4, parent's default's share diminishes accordingly. Thus
-
- $ echo stats
-
- <snip>
- 20 total ioprio
- 16 unused/default ioprio
-
-
-Monitoring
-----------
-
-Each priority level's request service rate can be viewed through sysfs (mounted
-during installation). To view the servicing of priority 4's requests,
-
- $ while : ; echo /sys/block/<device>/queue/iosched/p4 ; sleep 1 ; done
- rq (10,15) sec (20,30) q (40,50)
-
- <data above updated in a loop>
-
-where
- rq = cumulative I/O requests received (10) and serviced (15)
- sec = cumulative sectors requested (20) and served (30)
- q = cumulative number of times the queue was created(40)/destroyed (50)
-
-The rate at which requests or sectors are serviced should differ for different
-priority levels. The difference in received and serviced values indicates queue
-depth - with insufficient depth, differentiation between I/O priority levels
-will not be observed.
-
-The rate of q creation is not significant for CKRM.
-
-
-Caveats
--------
-
-CFQ's I/O differentiation is still being worked upon so its better to choose
-widely separated share values to observe differences in delivered I/O
-bandwidth.
-
-CFQ, and consequently CKRM, does not provide limits yet. So it is not possible
-to completely limit an I/O hog process by putting it in a class with a low I/O
-share. Only if the competing classes maintain sufficient queue depth (i.e a
-high I/O issue rate) will they get preferential treatment. However, they may
-still see latency degradation due to seeks caused by servicing of the low
-priority class.
-
-When limits are implemented, this behaviour will be rectified.
-
-Please post questions on the CKRM I/O scheduler on ckrm-tech@lists.sf.net.
-
-
+++ /dev/null
-CKRM Basics
--------------
-A brief review of CKRM concepts and terminology will help make installation
-and testing easier. For more details, please visit http://ckrm.sf.net.
-
-Currently there are two class types, taskclass and socketclass for grouping,
-regulating and monitoring tasks and sockets respectively.
-
-To avoid repeating instructions for each classtype, this document assumes a
-task to be the kernel object being grouped. By and large, one can replace task
-with socket and taskclass with socketclass.
-
-RCFS depicts a CKRM class as a directory. Hierarchy of classes can be
-created in which children of a class share resources allotted to
-the parent. Tasks can be classified to any class which is at any level.
-There is no correlation between parent-child relationship of tasks and
-the parent-child relationship of classes they belong to.
-
-Without a Classification Engine, class is inherited by a task. A privileged
-user can reassigned a task to a class as described below, after which all
-the child tasks under that task will be assigned to that class, unless the
-user reassigns any of them.
-
-A Classification Engine, if one exists, will be used by CKRM to
-classify a task to a class. The Rule based classification engine uses some
-of the attributes of the task to classify a task. When a CE is present
-class is not inherited by a task.
-
-Characteristics of a class can be accessed/changed through the following magic
-files under the directory representing the class:
-
-shares: allows to change the shares of different resources managed by the
- class
-stats: allows to see the statistics associated with each resources managed
- by the class
-target: allows to assign a task to a class. If a CE is present, assigning
- a task to a class through this interface will prevent CE from
- reassigning the task to any class during reclassification.
-members: allows to see which tasks has been assigned to a class
-config: allow to view and modify configuration information of different
- resources in a class.
-
-Resource allocations for a class is controlled by the parameters:
-
-guarantee: specifies how much of a resource is guranteed to a class. A
- special value DONT_CARE(-2) mean that there is no specific
- guarantee of a resource is specified, this class may not get
- any resource if the system is runing short of resources
-limit: specifies the maximum amount of resource that is allowed to be
- allocated by a class. A special value DONT_CARE(-2) mean that
- there is no specific limit is specified, this class can get all
- the resources available.
-total_guarantee: total guarantee that is allowed among the children of this
- class. In other words, the sum of "guarantee"s of all children
- of this class cannot exit this number.
-max_limit: Maximum "limit" allowed for any of this class's children. In
- other words, "limit" of any children of this class cannot exceed
- this value.
-
-None of this parameters are absolute or have any units associated with
-them. These are just numbers(that are relative to its parents') that are
-used to calculate the absolute number of resource available for a specific
-class.
-
-Note: The root class has an absolute number of resource units associated with it.
-
+++ /dev/null
-Usage of CKRM without a classification engine
------------------------------------------------
-
-1. Create a class
-
- # mkdir /rcfs/taskclass/c1
- creates a taskclass named c1 , while
- # mkdir /rcfs/socket_class/s1
- creates a socketclass named s1
-
-The newly created class directory is automatically populated by magic files
-shares, stats, members, target and config.
-
-2. View default shares
-
- # cat /rcfs/taskclass/c1/shares
-
- "guarantee=-2,limit=-2,total_guarantee=100,max_limit=100" is the default
- value set for resources that have controllers registered with CKRM.
-
-3. change shares of a <class>
-
- One or more of the following fields can/must be specified
- res=<res_name> #mandatory
- guarantee=<number>
- limit=<number>
- total_guarantee=<number>
- max_limit=<number>
- e.g.
- # echo "res=numtasks,limit=20" > /rcfs/taskclass/c1
-
- If any of these parameters are not specified, the current value will be
- retained.
-
-4. Reclassify a task (listening socket)
-
- write the pid of the process to the destination class' target file
- # echo 1004 > /rcfs/taskclass/c1/target
-
- write the "<ipaddress>\<port>" string to the destination class' target file
- # echo "0.0.0.0\32770" > /rcfs/taskclass/c1/target
-
-5. Get a list of tasks (sockets) assigned to a taskclass (socketclass)
-
- # cat /rcfs/taskclass/c1/members
- lists pids of tasks belonging to c1
-
- # cat /rcfs/socket_class/s1/members
- lists the ipaddress\port of all listening sockets in s1
-
-6. Get the statictics of different resources of a class
-
- # cat /rcfs/tasksclass/c1/stats
- shows c1's statistics for each resource with a registered resource
- controller.
-
- # cat /rcfs/socket_class/s1/stats
- show's s1's stats for the listenaq controller.
-
-7. View the configuration values of the resources associated with a class
-
- # cat /rcfs/taskclass/c1/config
- shows per-controller config values for c1.
-
-8. Change the configuration values of resources associated with a class
- Configuration values are different for different resources. the comman
- field "res=<resname>" must always be specified.
-
- # echo "res=numtasks,parameter=value" > /rcfs/taskclass/c1/config
- to change (without any effect), the value associated with <parameter>.
-
-
+++ /dev/null
-CRBCE
-----------
-
-crbce is a superset of rbce. In addition to providing automatic
-classification, the crbce module
-- monitors per-process delay data that is collected by the delay
-accounting patch
-- collects data on significant kernel events where reclassification
-could occur e.g. fork/exec/setuid/setgid etc., and
-- uses relayfs to supply both these datapoints to userspace
-
-To illustrate the utility of the data gathered by crbce, we provide a
-userspace daemon called crbcedmn that prints the header info received
-from the records sent by the crbce module.
-
-0. Ensure that a CKRM-enabled kernel with following options configured
- has been compiled. At a minimum, core, rcfs, atleast one classtype,
- delay-accounting patch and relayfs. For testing, it is recommended
- all classtypes and resource controllers be compiled as modules.
-
-1. Ensure that the Makefile's BUILD_CRBCE=1 and KDIR points to the
- kernel of step 1 and call make.
- This also builds the userspace daemon, crbcedmn.
-
-2..9 Same as rbce installation and testing instructions,
- except replacing rbce.ko with crbce.ko
-
-10. Read the pseudo daemon help file
- # ./crbcedmn -h
-
-11. Run the crbcedmn to display all records being processed
- # ./crbcedmn
-
+++ /dev/null
-Kernel installation
-------------------------------
-
-<kernver> = version of mainline Linux kernel
-<ckrmver> = version of CKRM
-
-Note: It is expected that CKRM versions will change fairly rapidly. Hence once
-a CKRM version has been released for some <kernver>, it will only be made
-available for future <kernver>'s until the next CKRM version is released.
-
-1. Patch
-
- Apply ckrm/kernel/<kernver>/ckrm-<ckrmversion>.patch to a mainline kernel
- tree with version <kernver>.
-
- If CRBCE will be used, additionally apply the following patches, in order:
- delayacctg-<ckrmversion>.patch
- relayfs-<ckrmversion>.patch
-
-
-2. Configure
-
-Select appropriate configuration options:
-
-a. for taskclasses
-
- General Setup-->Class Based Kernel Resource Management
-
- [*] Class Based Kernel Resource Management
- <M> Resource Class File System (User API)
- [*] Class Manager for Task Groups
- <M> Number of Tasks Resource Manager
-
-b. To test socket_classes and multiple accept queue controller
-
- General Setup-->Class Based Kernel Resource Management
- [*] Class Based Kernel Resource Management
- <M> Resource Class File System (User API)
- [*] Class Manager for socket groups
- <M> Multiple Accept Queues Resource Manager
-
- Device Drivers-->Networking Support-->Networking options-->
- [*] Network packet filtering (replaces ipchains)
- [*] IP: TCP Multiple accept queues support
-
-c. To test CRBCE later (requires 2a.)
-
- File Systems-->Pseudo filesystems-->
- <M> Relayfs filesystem support
- (enable all sub fields)
-
- General Setup-->
- [*] Enable delay accounting
-
-
-3. Build, boot into kernel
-
-4. Enable rcfs
-
- # insmod <patchedtree>/fs/rcfs/rcfs.ko
- # mount -t rcfs rcfs /rcfs
-
- This will create the directories /rcfs/taskclass and
- /rcfs/socketclass which are the "roots" of subtrees for creating
- taskclasses and socketclasses respectively.
-
-5. Load numtasks and listenaq controllers
-
- # insmod <patchedtree>/kernel/ckrm/ckrm_tasks.ko
- # insmod <patchedtree>/kernel/ckrm/ckrm_listenaq.ko
+++ /dev/null
-0. Lifecycle of a LRU Page:
-----------------------------
-These are the events in a page's lifecycle:
- - allocation of the page
- there are multiple high level page alloc functions; __alloc_pages()
- is the lowest level function that does the real allocation.
- - get into LRU list (active list or inactive list)
- - get out of LRU list
- - freeing the page
- there are multiple high level page free functions; free_pages_bulk()
- is the lowest level function that does the real free.
-
-When the memory subsystem runs low on LRU pages, pages are reclaimed by
- - moving pages from active list to inactive list (refill_inactive_zone())
- - freeing pages from the inactive list (shrink_zone)
-depending on the recent usage of the page(approximately).
-
-1. Introduction
----------------
-Memory resource controller controls the number of lru physical pages
-(active and inactive list) a class uses. It does not restrict any
-other physical pages (slabs etc.,)
-
-For simplicity, this document will always refer lru physical pages as
-physical pages or simply pages.
-
-There are two parameters(that are set by the user) that affect the number
-of pages a class is allowed to have in active/inactive list.
-They are
- - guarantee - specifies the number of pages a class is
- guaranteed to get. In other words, if a class is using less than
- 'guarantee' number of pages, its pages will not be freed when the
- memory subsystem tries to free some pages.
- - limit - specifies the maximum number of pages a class can get;
- 'limit' in essence can be considered as the 'hard limit'
-
-Rest of this document details how these two parameters are used in the
-memory allocation logic.
-
-Note that the numbers that are specified in the shares file, doesn't
-directly correspond to the number of pages. But, the user can make
-it so by making the total_guarantee and max_limit of the default class
-(/rcfs/taskclass) to be the total number of pages(given in config file)
-available in the system.
-
- for example:
- # cd /rcfs/taskclass
- # cat config
- res=mem;tot_pages=239778,active=60473,inactive=135285,free=44555
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=100,max_limit=100
-
- "tot_pages=239778" above mean there are 239778 lru pages in
- the system.
-
- By making total_guarantee and max_limit to be same as this number at
- this level (/rcfs/taskclass), one can make guarantee and limit in all
- classes refer to the number of pages.
-
- # echo 'res=mem,total_guarantee=239778,max_limit=239778' > shares
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=239778,max_limit=239778
-
-
-The number of pages a class can use be anywhere between its guarantee and
-limit. CKRM memory controller springs into action when the system needs
-to choose a victim page to swap out. While the number of pages a class can
-have allocated may be anywhere between its guarantee and limit, victim
-pages will be choosen from classes that are above their guarantee.
-
-Pages will be freed from classes that are close to their "limit" before
-freeing pages from the classes that are close to their guarantee. Pages
-belonging to classes that are below their guarantee will not be chosen as
-a victim.
-
-2. Core Design
---------------------------
-
-CKRM memory resource controller taps at appropriate low level memory
-management functions to associate a page with a class and to charge
-a class that brings the page to the LRU list.
-
-2.1 Changes in page allocation function(__alloc_pages())
---------------------------------------------------------
-- If the class that the current task belong to is over 110% of its 'limit',
- allocation of page(s) fail.
-- After succesful allocation of a page, the page is attached with the class
- to which the current task belongs to.
-- Note that the class is _not_ charged for the page(s) here.
-
-2.2 Changes in page free(free_pages_bulk())
--------------------------------------------
-- page is freed from the class it belongs to.
-
-2.3 Adding/Deleting page to active/inactive list
--------------------------------------------------
-When a page is added to the active or inactive list, the class that the
-page belongs to is charged for the page usage.
-
-When a page is deleted from the active or inactive list, the class that the
-page belongs to is credited back.
-
-If a class uses upto its limit, attempt is made to shrink the class's usage
-to 90% of its limit, in order to help the class stay within its limit.
-But, if the class is aggressive, and keep getting over the class's limit
-often(more than 10 shrink events in 10 seconds), then the memory resource
-controller gives up on the class and doesn't try to shrink the class, which
-will eventually lead the class to reach its 110% of its limit and then the
-page allocations will start failing.
-
-2.4 Chages in the page reclaimation path (refill_inactive_zone and shrink_zone)
--------------------------------------------------------------------------------
-Pages will be moved from active to inactive list(refill_inactive_zone) and
-pages from inactive list will be freed in the following order:
-(range is calculated by subtracting 'guarantee' from 'limit')
- - Classes that are over 110% of their range
- - Classes that are over 100% of their range
- - Classes that are over 75% of their range
- - Classes that are over 50% of their range
- - Classes that are over 25% of their range
- - Classes whose parent is over 110% of its range
- - Classes that are over their guarantee
-
-2.5 Handling of Shared pages
-----------------------------
-Even if a mm is shared by tasks, the pages that belong to the mm will be
-charged against the individual tasks that bring the page into LRU.
-
-But, when any task that is using a mm moves to a different class or exits,
-then all pages that belong to the mm will be charged against the richest
-class among the tasks that are using the mm.
-
-Note: Shared page handling need to be improved with a better policy.
-
+++ /dev/null
-Installation
-------------
-
-1. Configure "Class based physical memory controller" under CKRM (see
- Documentation/ckrm/installation)
-
-2. Reboot the system with the new kernel.
-
-3. Verify that the memory controller is present by reading the file
- /rcfs/taskclass/config (should show a line with res=mem)
-
-Usage
------
-
-For brevity, unless otherwise specified all the following commands are
-executed in the default class (/rcfs/taskclass).
-
-Initially, the systemwide default class gets 100% of the LRU pages, and the
-config file displays the total number of physical pages.
-
- # cd /rcfs/taskclass
- # cat config
- res=mem;tot_pages=239778,active=60473,inactive=135285,free=44555
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=100,max_limit=100
-
- tot_pages - total number of pages
- active - number of pages in the active list ( sum of all zones)
- inactive - number of pages in the inactive list ( sum of all zones )
- free - number of free pages (sum of all pages)
-
- By making total_guarantee and max_limit to be same as tot_pages, one make
- make the numbers in shares file be same as the number of pages for a
- class.
-
- # echo 'res=mem,total_guarantee=239778,max_limit=239778' > shares
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=239778,max_limit=239778
-
-
-Class creation
---------------
-
- # mkdir c1
-
-Its initial share is don't care. The parent's share values will be unchanged.
-
-Setting a new class share
--------------------------
-
- # echo 'res=mem,guarantee=25000,limit=50000' > c1/shares
-
- # cat c1/shares
- res=mem,guarantee=25000,limit=50000,total_guarantee=100,max_limit=100
-
- 'guarantee' specifies the number of pages this class entitled to get
- 'limit' is the maximum number of pages this class can get.
-
-Monitoring
-----------
-
-stats file shows statistics of the page usage of a class
- # cat stats
- ----------- Memory Resource stats start -----------
- Number of pages used(including pages lent to children): 196654
- Number of pages guaranteed: 239778
- Maximum limit of pages: 239778
- Total number of pages available(after serving guarantees to children): 214778
- Number of pages lent to children: 0
- Number of pages borrowed from the parent: 0
- ----------- Memory Resource stats end -----------
-
+++ /dev/null
-Rule-based Classification Engine (RBCE)
--------------------------------------------
-
-The ckrm/rbce directory contains the sources for two classification engines
-called rbce and crbce. Both are optional, built as kernel modules and share much
-of their codebase. Only one classification engine (CE) can be loaded at a time
-in CKRM.
-
-
-With RBCE, user can specify rules for how tasks are classified to a
-class. Rules are specified by one or more attribute-value pairs and
-an associated class. The tasks that match all the attr-value pairs
-will get classified to the class attached with the rule.
-
-The file rbce_info under /rcfs/ce directory details the functionality
-of different files available under the directory and also details
-about attributes that can are used to define rules.
-
-order: When multiple rules are defined the rules are executed
- according to the order of a rule. Order can be specified
- while defining a rule. If order is not specified, the
- highest order will be assigned to the rule(i.e, the new
- rule will be executed after all the previously defined
- evaluate false). So, order of rules is important as that
- will decide, which class a task will get assigned to. For
- example, if we have the two following rules: r1:
- uid=1004,order=10,class=/rcfs/taskclass/c1 r2:
- uid=1004,cmd=grep,order=20,class=/rcfs/taskclass/c2 then,
- the task "grep" executed by user 1004 will always be
- assigned to class /rcfs/taskclass/c1, as rule r1 will be
- executed before r2 and the task successfully matched the
- rule's attr-value pairs. Rule r2 will never be consulted
- for the command. Note: The order in which the rules are
- displayed(by ls) has no correlation with the order of the
- rule.
-
-dependency: Rules can be defined to be depend on another rule. i.e a
- rule can be dependent on one rule and has its own
- additional attr-value pairs. the dependent rule will
- evaluate true only if all the attr-value pairs of both
- rules are satisfied. ex: r1: gid=502,class=/rcfs/taskclass
- r2: depend=r1,cmd=grep,class=rcfstaskclass/c1 r2 is a
- dependent rule that depends on r1, a task will be assigned
- to /rcfs/taskclass/c1 if its gid is 502 and the executable
- command name is "grep". If a task's gid is 502 but the
- command name is _not_ "grep" then it will be assigned to
- /rcfs/taskclass
-
- Note: The order of dependent rule must be _lesser_ than the
- rule it depends on, so that it is evaluated _before the
- base rule is evaluated. Otherwise the base rule will
- evaluate true and the task will be assigned to the class of
- that rule without the dependent rule ever getting
- evaluated. In the example above, order of r2 must be lesser
- than order of r1.
-
-app_tag: a task can be attached with a tag(ascii string), that becomes
- an attribute of that task and rules can be defined with the
- tag value.
-
-state: states are at two levels in RBCE. The entire RBCE can be
- enabled or disabled which writing 1 or 0 to the file
- rbce_state under /rcfs/ce. Disabling RBCE, would mean that
- the rules defined in RBCE will not be utilized for
- classifying a task to a class. A specific rule can be
- enabled/disabled by changing the state of that rule. Once
- it is disabled, the rule will not be evaluated.
+++ /dev/null
-Usage of CKRM with RBCE
---------------------------
-
-0. Ensure that a CKRM-enabled kernel with following options configured
- has been compiled. At a minimum, core, rcfs and atleast one
- classtype. For testing, it is recommended all classtypes and
- resource controllers be compiled as modules.
-
-1. Change ckrm/rbce/Makefile's KDIR to point to this compiled kernel's source
- tree and call make
-
-2. Load rbce module.
- # insmod ckrm/rbce/rbce.ko
- Note that /rcfs has to be mounted before this.
- Note: this command should populate the directory /rcfs/ce with files
- rbce_reclassify, rbce_tag, rbce_info, rbce_state and a directory
- rules.
-
- Note2: If these are not created automatically, just create them by
- using the commands touch and mkdir.(bug that needs to be fixed)
-
-3. Defining a rule
- Rules are defined by creating(by writing) to a file under the
- /rcfs/ce/rules directory by concatinating multiple attribute value
- pairs.
-
- Note that the classes must be defined before defining rules that
- uses the classes. eg: the command # echo
- "uid=1004,class=/rcfs/taskclass/c1" > /rcfs/ce/rules/r1 will define
- a rule r1 that classifies all tasks belong to user id 1004 to class
- /rcfs/taskclass/c1
-
-4. Viewing a rule
- read the corresponding file.
- to read rule r1, issue the command:
- # cat /rcfs/ce/rules/r1
-
-5. Changing a rule
-
- Changing a rule is done the same way as defining a rule, the new
- rule will include the old set of attr-value pairs slapped with new
- attr-value pairs. eg: if the current r2 is
- uid=1004,depend=r1,class=/rcfs/taskclass/c1
- (r1 as defined in step 3)
-
- the command:
- # echo gid=502 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,gid=502,depend=r1,class=/rcfs/taskclass/c1
-
- the command:
- # echo uid=1005 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1005,class=/rcfs/taskclass/c1
-
- the command:
- # echo class=/rcfs/taskclass/c2 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r1,class=/rcfs/taskclass/c2
-
- the command:
- # echo depend=r4 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r4,class=/rcfs/taskclass/c2
-
- the command:
- # echo +depend=r4 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r1,depend=r4,class=/rcfs/taskclass/c2
-
- the command:
- # echo -depend=r1 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,class=/rcfs/taskclass/c2
-
-6. Checking the state of RBCE
- State(enabled/disabled) of RBCE can be checked by reading the file
- /rcfs/ce/rbce_state, it will show 1(enabled) or 0(disabled).
- By default, RBCE is enabled(1).
- ex: # cat /rcfs/ce/rbce_state
-
-7. Changing the state of RBCE
- State of RBCE can be changed by writing 1(enable) or 0(disable).
- ex: # echo 1 > cat /rcfs/ce/rbce_state
-
-8. Checking the state of a rule
- State of a rule is displayed in the rule. Rule can be viewed by
- reading the rule file. ex: # cat /rcfs/ce/rules/r1
-
-9. Changing the state of a rule
-
- State of a rule can be changed by writing "state=1"(enable) or
- "state=0"(disable) to the corresponding rule file. By defeault, the
- rule is enabled when defined. ex: to disable an existing rule r1,
- issue the command
- # echo "state=0" > /rcfs/ce/rules/r1
-
-
To create the ip2mkdev shell script change to a convenient directory (/tmp
works just fine) and run the following command:
- unshar Documentation/computone.txt
+ unshar /usr/src/linux/Documentation/computone.txt
(This file)
You should now have a file ip2mkdev in your current working directory with
Herbert Valerio Riedel
Kyle McMartin
Adam J. Richter
- Fruhwirth Clemens (i586)
- Linus Torvalds (i586)
CAST5 algorithm contributors:
Kartikey Mahendra Bhatt (original developers unknown, FSF copyright).
-TEA/XTEA algorithm contributors:
- Aaron Grothe
-
-Khazad algorithm contributors:
- Aaron Grothe
-
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
Please send any credits updates or corrections to:
32 = /dev/ttyDB0 DataBooster serial port 0
...
39 = /dev/ttyDB7 DataBooster serial port 7
- 40 = /dev/ttySG0 SGI Altix console port
205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
32 = /dev/cudb0 Callout device for ttyDB0
...
39 = /dev/cudb7 Callout device for ttyDB7
- 40 = /dev/cusg0 Callout device for ttySG0
206 char OnStream SC-x0 tape devices
0 = /dev/osst0 First OnStream SCSI tape, mode 0
Supporting Tools:
-----------------
Supporting tools include digiDload, digiConfig, buildPCI, and ditty. See
-drivers/char/README.epca for more details. Note,
+/usr/src/linux/Documentation/README.epca.dir/user.doc for more details. Note,
this driver REQUIRES that digiDload be executed prior to it being used.
Failure to do this will result in an ENODEV error.
binary-only firmware.
The DVB drivers will be converted to use the request_firmware()
-hotplug interface (see Documentation/firmware_class/).
+hotplug interface (see linux/Documentation/firmware_class/).
(CONFIG_FW_LOADER)
The firmware can be loaded automatically via the hotplug manager
Hotplug Firmware Loading for 2.6 kernels
----------------------------------------
For 2.6 kernels the firmware is loaded at the point that the driver module is
-loaded. See Documentation/dvb/firmware.txt for more information.
+loaded. See linux/Documentation/dvb/firmware.txt for more information.
mv STB_PC_T.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2000t.fw
mv STB_PC_X.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2540t.fw
loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
- int (*check_flags)(int);
- int (*dir_notify)(struct file *, unsigned long);
};
locking rules:
sendfile: no
sendpage: no
get_unmapped_area: no
-check_flags: no
-dir_notify: no
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
and thrash the system to death, so large and/or important servers will want to
set this value to 0.
-nr_hugepages and hugetlb_shm_group
-----------------------------------
-
-nr_hugepages configures number of hugetlb page reserved for the system.
-
-hugetlb_shm_group contains group id that is allowed to create SysV shared
-memory segment using hugetlb page.
-
-laptop_mode
------------
-
-laptop_mode is a knob that controls "laptop mode". All the things that are
-controlled by this knob are discussed in Documentation/laptop-mode.txt.
-
-block_dump
-----------
-
-block_dump enables block I/O debugging when set to a nonzero value. More
-information on block I/O debugging is in Documentation/laptop-mode.txt.
-
2.5 /proc/sys/dev - Device specific parameters
----------------------------------------------
command to write value into these files, thereby changing the default settings
of the kernel.
------------------------------------------------------------------------------
+
+
+
+
+
+
+
*
-* Documentation/filesystems/udf.txt
+* ./Documentation/filesystems/udf.txt
*
UDF Filesystem version 0.9.8.1
though not all of them are actually meaningful to the kernel. Boot
loader authors who need additional command line options for the boot
loader itself should get them registered in
-Documentation/kernel-parameters.txt to make sure they will not
+linux/Documentation/kernel-parameters.txt to make sure they will not
conflict with actual kernel options now or in the future.
vga=<mode>
module outside the kernel is to use the kernel build system,
kbuild. Use the following command-line:
-make -C path/to/kernel/src M=$PWD modules
+make -C path/to/kernel/src SUBDIRS=$PWD modules
This requires that a makefile exits made in accordance to
Documentation/kbuild/makefiles.txt. Read that file for more details on
# Invokes the kernel build system to come back to the current
# directory and build yourmodule.ko.
default:
- make -C ${KERNEL_SOURCE} M=`pwd` modules
+ make -C ${KERNEL_SOURCE} SUBDIRS=`pwd` modules
Document Author: Bart Samwel (bart@samwel.tk)
Date created: January 2, 2004
-Last modified: July 10, 2004
+Last modified: April 3, 2004
Introduction
------------
-Laptop mode is used to minimize the time that the hard disk needs to be spun up,
+Laptopmode is used to minimize the time that the hard disk needs to be spun up,
to conserve battery power on laptops. It has been reported to cause significant
power savings.
--------
* Introduction
-* Installation
+* The short story
* Caveats
-* The Details
+* The details
* Tips & Tricks
* Control script
* ACPI integration
* Monitoring tool
-Installation
-------------
+The short story
+---------------
To use laptop mode, you don't need to set any kernel configuration options
-or anything. Simply install all the files included in this document, and
-laptop mode will automatically be started when you're on battery. For
-your convenience, a tarball containing an installer can be downloaded at:
+or anything. You simply need to run the laptop_mode control script (which
+is included in this document) as follows:
+
+# laptop_mode start
-http://www.xs4all.nl/~bsamwel/laptop_mode/tools
+Then set your harddisk spindown time to a relatively low value with hdparm:
-To configure laptop mode, you need to edit the configuration file, which is
-located in /etc/default/laptop-mode on Debian-based systems, or in
-/etc/sysconfig/laptop-mode on other systems.
+hdparm -S 4 /dev/hda
-Unfortunately, automatic enabling of laptop mode does not work for
-laptops that don't have ACPI. On those laptops, you need to start laptop
-mode manually. To start laptop mode, run "laptop_mode start", and to
-stop it, run "laptop_mode stop". (Note: The laptop mode tools package now
-has experimental support for APM, you might want to try that first.)
+The value -S 4 means 20 seconds idle time before spindown. Your harddisk will
+now only spin up when a disk cache miss occurs, or at least once every 10
+minutes to write back any pending changes.
+
+To stop laptop_mode, run "laptop_mode stop".
Caveats
-------
-* The downside of laptop mode is that you have a chance of losing up to 10
- minutes of work. If you cannot afford this, don't use it! The supplied ACPI
- scripts automatically turn off laptop mode when the battery almost runs out,
- so that you won't lose any data at the end of your battery life.
+* The downside of laptop mode is that you have a chance of losing up
+ to 10 minutes of work. If you cannot afford this, don't use it! It's
+ wise to turn OFF laptop mode when you're almost out of battery --
+ although this will make the battery run out faster, at least you'll
+ lose less work when it actually runs out. I'm still looking for someone
+ to submit instructions on how to turn off laptop mode when battery is low,
+ e.g., using ACPI events. I don't have a laptop myself, so if you do and
+ you care to contribute such instructions, please do.
* Most desktop hard drives have a very limited lifetime measured in spindown
cycles, typically about 50.000 times (it's usually listed on the spec sheet).
* If you have your filesystems listed as type "auto" in fstab, like I did, then
the control script will not recognize them as filesystems that need remounting.
- You must list the filesystems with their true type instead.
* It has been reported that some versions of the mutt mail client use file access
times to determine whether a folder contains new mail. If you use mutt and
- experience this, you must disable the noatime remounting by setting the option
- DO_REMOUNT_NOATIME to 0 in the configuration file.
+ experience this, you must disable the noatime remounting in the control script
+ by setting DO_REMOUNT_NOATIME=0.
-The Details
+The details
-----------
-Laptop mode is controlled by the knob /proc/sys/vm/laptop_mode. This knob is
+Laptop-mode is controlled by the flag /proc/sys/vm/laptop_mode. This flag is
present for all kernels that have the laptop mode patch, regardless of any
-configuration options. When the knob is set, any physical disk I/O (that might
-have caused the hard disk to spin up) causes Linux to flush all dirty blocks. The
-result of this is that after a disk has spun down, it will not be spun up
-anymore to write dirty blocks, because those blocks had already been written
-immediately after the most recent read operation. The value of the laptop_mode
-knob determines the time between the occurrence of disk I/O and when the flush
-is triggered. A sensible value for the knob is 5 seconds. Setting the knob to
-0 disables laptop mode.
+configuration options. When the flag is set, any physical disk read operation
+(that might have caused the hard disk to spin up) causes Linux to flush all dirty
+blocks. The result of this is that after a disk has spun down, it will not be spun
+up anymore to write dirty blocks, because those blocks had already been written
+immediately after the most recent read operation.
To increase the effectiveness of the laptop_mode strategy, the laptop_mode
control script increases dirty_expire_centisecs and dirty_writeback_centisecs in
all block dirtyings done to files. This makes it possible to debug why a disk
needs to spin up, and to increase battery life even more. The output of
block_dump is written to the kernel output, and it can be retrieved using
-"dmesg". When you use block_dump and your kernel logging level also includes
-kernel debugging messages, you probably want to turn off klogd, otherwise
+"dmesg". When you use block_dump, you may want to turn off klogd, otherwise
the output of block_dump will be logged, causing disk activity that is not
normally there.
+If 10 minutes is too much or too little downtime for you, you can configure
+this downtime as follows. In the control script, set the MAX_AGE value to the
+maximum number of seconds of disk downtime that you would like. You should
+then set your filesystem's commit interval to the same value. The dirty ratio
+is also configurable from the control script.
-Configuration
--------------
-
-The laptop mode configuration file is located in /etc/default/laptop-mode on
-Debian-based systems, or in /etc/sysconfig/laptop-mode on other systems. It
-contains the following options:
-
-MAX_AGE:
-
-Maximum time, in seconds, of hard drive spindown time that you are
-confortable with. Worst case, it's possible that you could lose this
-amount of work if your battery fails while you're in laptop mode.
-
-MINIMUM_BATTERY_MINUTES:
-
-Automatically disable laptop mode if the remaining number of minutes of
-battery power is less than this value. Default is 10 minutes.
-
-AC_HD/BATT_HD:
-
-The idle timeout that should be set on your hard drive when laptop mode
-is active (BATT_HD) and when it is not active (AC_HD). The defaults are
-20 seconds (value 4) for BATT_HD and 2 hours (value 244) for AC_HD. The
-possible values are those listed in the manual page for "hdparm" for the
-"-S" option.
-
-HD:
-
-The devices for which the spindown timeout should be adjusted by laptop mode.
-Default is /dev/hda. If you specify multiple devices, separate them by a space.
-
-READAHEAD:
-
-Disk readahead, in 512-byte sectors, while laptop mode is active. A large
-readahead can prevent disk accesses for things like executable pages (which are
-loaded on demand while the application executes) and sequentially accessed data
-(MP3s).
-
-DO_REMOUNTS:
-
-The control script automatically remounts any mounted journaled filesystems
-with approriate commit interval options. When this option is set to 0, this
-feature is disabled.
-
-DO_REMOUNT_NOATIME:
-
-When remounting, should the filesystems be remounted with the noatime option?
-Normally, this is set to "1" (enabled), but there may be programs that require
-access time recording.
-
-DIRTY_RATIO:
+If you don't like the idea of the control script remounting your filesystems
+for you, you can change DO_REMOUNTS to 0 in the script.
-The percentage of memory that is allowed to contain "dirty" or unsaved data
-before a writeback is forced, while laptop mode is active. Corresponds to
-the /proc/sys/vm/dirty_ratio sysctl.
-
-DIRTY_BACKGROUND_RATIO:
-
-The percentage of memory that is allowed to contain "dirty" or unsaved data
-after a forced writeback is done due to an exceeding of DIRTY_RATIO. Set
-this nice and low. This corresponds to the /proc/sys/vm/dirty_background_ratio
-sysctl.
-
-Note that the behaviour of dirty_background_ratio is quite different
-when laptop mode is active and when it isn't. When laptop mode is inactive,
-dirty_background_ratio is the threshold percentage at which background writeouts
-start taking place. When laptop mode is active, however, background writeouts
-are disabled, and the dirty_background_ratio only determines how much writeback
-is done when dirty_ratio is reached.
-
-DO_CPU:
-
-Enable CPU frequency scaling when in laptop mode. (Requires CPUFreq to be setup.
-See Documentation/cpu-freq/user-guide.txt for more info. Disabled by default.)
-
-CPU_MAXFREQ:
-
-When on battery, what is the maximum CPU speed that the system should use? Legal
-values are "slowest" for the slowest speed that your CPU is able to operate at,
-or a value listed in /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies.
+Thanks to Kiko Piris, the control script can be used to enable laptop mode on
+both the Linux 2.4 and 2.6 series.
Tips & Tricks
-------------
* Bartek Kania reports getting up to 50 minutes of extra battery life (on top
- of his regular 3 to 3.5 hours) using a spindown time of 5 seconds (BATT_HD=1).
+ of his regular 3 to 3.5 hours) using very aggressive power management (hdparm
+ -B1) and a spindown time of 5 seconds (hdparm -S1).
-* You can spin down the disk while playing MP3, by setting disk readahead
- to 8MB (READAHEAD=16384). Effectively, the disk will read a complete MP3 at
+* You can spin down the disk while playing MP3, by setting the disk readahead
+ to 8MB (hdparm -a 16384). Effectively, the disk will read a complete MP3 at
once, and will then spin down while the MP3 is playing. (Thanks to Bartek
Kania.)
this on powerbooks too. I hope that this is a piece of information that
might be useful to the Laptop Mode patch or it's users."
+* One thing which will cause disks to spin up is not-present application
+ and dynamic library text pages. The kernel will load program text off disk
+ on-demand, so each time you invoke an application feature for the first
+ time, the kernel needs to spin the disk up to go and fetch that part of the
+ application.
+
+ So it is useful to increase the disk readahead parameter greatly, so that
+ the kernel will pull all of the executable's pages into memory on the first
+ pagefault.
+
+ The supplied script does this.
+
* In syslog.conf, you can prefix entries with a dash ``-'' to omit syncing the
file after every logging. When you're using laptop-mode and your disk doesn't
spin down, this is a likely culprit.
(http://noflushd.sourceforge.net/), it seems that noflushd prevents laptop-mode
from doing its thing.
-* If you're worried about your data, you might want to consider using a USB
- memory stick or something like that as a "working area". (Be aware though
- that flash memory can only handle a limited number of writes, and overuse
- may wear out your memory stick pretty quickly. Do _not_ use journalling
- filesystems on flash memory sticks.)
-
-
-Configuration file for control and ACPI battery scripts
--------------------------------------------------------
-
-This allows the tunables to be changed for the scripts via an external
-configuration file
-
-It should be installed as /etc/default/laptop-mode on Debian, and as
-/etc/sysconfig/laptop-mode on Red Hat, SUSE, Mandrake, and other work-alikes.
-
---------------------CONFIG FILE BEGIN-------------------------------------------
-# Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
-# amount of work if your battery fails you while in laptop mode.
-#MAX_AGE=600
-
-# Automatically disable laptop mode when the number of minutes of battery
-# that you have left goes below this threshold.
-MINIMUM_BATTERY_MINUTES=10
-
-# Read-ahead, in 512-byte sectors. You can spin down the disk while playing MP3/OGG
-# by setting the disk readahead to 8MB (READAHEAD=16384). Effectively, the disk
-# will read a complete MP3 at once, and will then spin down while the MP3/OGG is
-# playing.
-#READAHEAD=4096
-
-# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
-#DO_REMOUNTS=1
-
-# And shall we add the "noatime" option to that as well? (1=yes)
-#DO_REMOUNT_NOATIME=1
-
-# Dirty synchronous ratio. At this percentage of dirty pages the process
-# which
-# calls write() does its own writeback
-#DIRTY_RATIO=40
-
-#
-# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
-#
-#DIRTY_BACKGROUND_RATIO=5
-
-# kernel default dirty buffer age
-#DEF_AGE=30
-#DEF_UPDATE=5
-#DEF_DIRTY_BACKGROUND_RATIO=10
-#DEF_DIRTY_RATIO=40
-#DEF_XFS_AGE_BUFFER=15
-#DEF_XFS_SYNC_INTERVAL=30
-#DEF_XFS_BUFD_INTERVAL=1
-
-# This must be adjusted manually to the value of HZ in the running kernel
-# on 2.4, until the XFS people change their 2.4 external interfaces to work in
-# centisecs. This can be automated, but it's a work in progress that still
-# needs# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for
-# external interfaces, and that is currently always set to 100. So you don't
-# need to change this on 2.6.
-#XFS_HZ=100
-
-# Should the maximum CPU frequency be adjusted down while on battery?
-# Requires CPUFreq to be setup.
-# See Documentation/cpu-freq/user-guide.txt for more info
-#DO_CPU=0
-
-# When on battery what is the maximum CPU speed that the system should
-# use? Legal values are "slowest" for the slowest speed that your
-# CPU is able to operate at, or a value listed in:
-# /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
-# Only applicable if DO_CPU=1.
-#CPU_MAXFREQ=slowest
-
-# Idle timeout for your hard drive (man hdparm for valid values, -S option)
-# Default is 2 hours on AC (AC_HD=244) and 20 seconds for battery (BATT_HD=4).
-#AC_HD=244
-#BATT_HD=4
-
-# The drives for which to adjust the idle timeout. Separate them by a space,
-# e.g. HD="/dev/hda /dev/hdb".
-#HD="/dev/hda"
-
-# Set the spindown timeout on a hard drive?
-#DO_HD=1
-
---------------------CONFIG FILE END---------------------------------------------
-
Control script
--------------
-Please note that this control script works for the Linux 2.4 and 2.6 series (thanks
-to Kiko Piris).
+Please note that this control script works for the Linux 2.4 and 2.6 series.
---------------------CONTROL SCRIPT BEGIN----------------------------------------
+--------------------CONTROL SCRIPT BEGIN------------------------------------------
#!/bin/bash
# start or stop laptop_mode, best run by a power management daemon when
#############################################################################
-# Source config
-if [ -f /etc/default/laptop-mode ] ; then
- # Debian
- . /etc/default/laptop-mode
-elif [ -f /etc/sysconfig/laptop-mode ] ; then
- # Others
- . /etc/sysconfig/laptop-mode
-fi
-
-# Don't raise an error if the config file is incomplete
-# set defaults instead:
-
-# Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
-# amount of work if your battery fails you while in laptop mode.
-MAX_AGE=${MAX_AGE:-'600'}
+# Age time, in seconds. should be put into a sysconfig file
+MAX_AGE=600
# Read-ahead, in kilobytes
-READAHEAD=${READAHEAD:-'4096'}
+READAHEAD=4096
# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
-DO_REMOUNTS=${DO_REMOUNTS:-'1'}
+DO_REMOUNTS=1
# And shall we add the "noatime" option to that as well? (1=yes)
-DO_REMOUNT_NOATIME=${DO_REMOUNT_NOATIME:-'1'}
-
-# Shall we adjust the idle timeout on a hard drive?
-DO_HD=${DO_HD:-'1'}
-
-# Adjust idle timeout on which hard drive?
-HD="${HD:-'/dev/hda'}"
-
-# spindown time for HD (hdparm -S values)
-AC_HD=${AC_HD:-'244'}
-BATT_HD=${BATT_HD:-'4'}
+DO_REMOUNT_NOATIME=1
# Dirty synchronous ratio. At this percentage of dirty pages the process which
# calls write() does its own writeback
-DIRTY_RATIO=${DIRTY_RATIO:-'40'}
-
-# cpu frequency scaling
-# See Documentation/cpu-freq/user-guide.txt for more info
-DO_CPU=${CPU_MANAGE:-'0'}
-CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
+DIRTY_RATIO=40
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
# of dirty memory to dirty_background_ratio. Set this nice and low, so once
# some writeout has commenced, we do a lot of it.
#
-DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
+DIRTY_BACKGROUND_RATIO=5
# kernel default dirty buffer age
-DEF_AGE=${DEF_AGE:-'30'}
-DEF_UPDATE=${DEF_UPDATE:-'5'}
-DEF_DIRTY_BACKGROUND_RATIO=${DEF_DIRTY_BACKGROUND_RATIO:-'10'}
-DEF_DIRTY_RATIO=${DEF_DIRTY_RATIO:-'40'}
-DEF_XFS_AGE_BUFFER=${DEF_XFS_AGE_BUFFER:-'15'}
-DEF_XFS_SYNC_INTERVAL=${DEF_XFS_SYNC_INTERVAL:-'30'}
-DEF_XFS_BUFD_INTERVAL=${DEF_XFS_BUFD_INTERVAL:-'1'}
+DEF_AGE=30
+DEF_UPDATE=5
+DEF_DIRTY_BACKGROUND_RATIO=10
+DEF_DIRTY_RATIO=40
+DEF_XFS_AGE_BUFFER=15
+DEF_XFS_SYNC_INTERVAL=30
+DEF_XFS_BUFD_INTERVAL=1
# This must be adjusted manually to the value of HZ in the running kernel
# on 2.4, until the XFS people change their 2.4 external interfaces to work in
# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for external
# interfaces, and that is currently always set to 100. So you don't need to
# change this on 2.6.
-XFS_HZ=${XFS_HZ:-'100'}
+XFS_HZ=100
#############################################################################
fi
}
-deduce_fstype () {
- MP="$1"
- # My root filesystem unfortunately has
- # type "unknown" in /etc/mtab. If we encounter
- # "unknown", we try to get the type from fstab.
- cat /etc/fstab |
- grep -v '^#' |
- while read FSTAB_DEV FSTAB_MP FSTAB_FST FSTAB_OPTS FSTAB_DUMP FSTAB_DUMP ; do
- if [ "$FSTAB_MP" = "$MP" ]; then
- echo $FSTAB_FST
- exit 0
- fi
- done
-}
if [ $DO_REMOUNT_NOATIME -eq 1 ] ; then
NOATIME_OPT=",noatime"
if [ $DO_REMOUNTS -eq 1 ]; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
PARSEDOPTS="$(parse_mount_opts "$OPTS")"
- if [ "$FST" = 'unknown' ]; then
- FST=$(deduce_fstype $MP)
- fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts commit "$OPTS")"
fi
done
fi
- if [ $DO_HD -eq 1 ] ; then
- for THISHD in $HD ; do
- /sbin/hdparm -S $BATT_HD $THISHD > /dev/null 2>&1
- /sbin/hdparm -B 1 $THISHD > /dev/null 2>&1
- done
- fi
- if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
- if [ $CPU_MAXFREQ = 'slowest' ]; then
- CPU_MAXFREQ=`cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq`
- fi
- echo $CPU_MAXFREQ > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
- fi
echo "."
;;
stop)
if [ $DO_REMOUNTS -eq 1 ] ; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
# Reset commit and atime options to defaults.
- if [ "$FST" = 'unknown' ]; then
- FST=$(deduce_fstype $MP)
- fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts_wfstab $DEV commit $OPTS)"
fi
done
fi
- if [ $DO_HD -eq 1 ] ; then
- for THISHD in $HD ; do
- /sbin/hdparm -S $AC_HD $THISHD > /dev/null 2>&1
- /sbin/hdparm -B 255 $THISHD > /dev/null 2>&1
- done
- fi
- if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
- echo `cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq` > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
- fi
echo "."
;;
*)
esac
exit 0
---------------------CONTROL SCRIPT END------------------------------------------
+--------------------CONTROL SCRIPT END--------------------------------------------
ACPI integration
----------------
Dax Kelson submitted this so that the ACPI acpid daemon will
-kick off the laptop_mode script and run hdparm. The part that
-automatically disables laptop mode when the battery is low was
-writen by Jan Topinski.
+kick off the laptop_mode script and run hdparm.
------------------/etc/acpi/events/ac_adapter BEGIN------------------------------
+---------------------------/etc/acpi/events/ac_adapter BEGIN-------------------------------------------
event=ac_adapter
-action=/etc/acpi/actions/ac.sh %e
-----------------/etc/acpi/events/ac_adapter END---------------------------------
-
+action=/etc/acpi/actions/battery.sh
+---------------------------/etc/acpi/events/ac_adapter END-------------------------------------------
------------------/etc/acpi/events/battery BEGIN---------------------------------
-event=battery.*
-action=/etc/acpi/actions/battery.sh %e
-----------------/etc/acpi/events/battery END------------------------------------
+---------------------------/etc/acpi/actions/battery.sh BEGIN-------------------------------------------
+#!/bin/sh
+# cpu throttling
+# cat /proc/acpi/processor/CPU0/throttling for more info
+ACAD_THR=0
+BATT_THR=2
-----------------/etc/acpi/actions/ac.sh BEGIN-----------------------------------
-#!/bin/bash
+# spindown time for HD (man hdparm for valid values)
+# I prefer 2 hours for acad and 20 seconds for batt
+ACAD_HD=244
+BATT_HD=4
-# ac on/offline event handler
+# ac/battery event handler
-status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/$2/state`
+status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/AC/state`
case $status in
"on-line")
+ echo "Setting HD spindown for AC mode."
/sbin/laptop_mode stop
+ /sbin/hdparm -S $ACAD_HD /dev/hda > /dev/null 2>&1
+ /sbin/hdparm -B 255 /dev/hda > /dev/null 2>&1
+ #echo -n $ACAD_CPU:$ACAD_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
"off-line")
+ echo "Setting HD spindown for battery mode."
/sbin/laptop_mode start
+ /sbin/hdparm -S $BATT_HD /dev/hda > /dev/null 2>&1
+ /sbin/hdparm -B 1 /dev/hda > /dev/null 2>&1
+ #echo -n $BATT_CPU:$BATT_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
esac
----------------------------/etc/acpi/actions/ac.sh END--------------------------
-
-
----------------------------/etc/acpi/actions/battery.sh BEGIN-------------------
-#! /bin/bash
-
-# Automatically disable laptop mode when the battery almost runs out.
-
-BATT_INFO=/proc/acpi/battery/$2/state
-
-if [[ -f /proc/sys/vm/laptop_mode ]]
-then
- LM=`cat /proc/sys/vm/laptop_mode`
- if [[ $LM -gt 0 ]]
- then
- if [[ -f $BATT_INFO ]]
- then
- # Source the config file only now that we know we need
- if [ -f /etc/default/laptop-mode ] ; then
- # Debian
- . /etc/default/laptop-mode
- elif [ -f /etc/sysconfig/laptop-mode ] ; then
- # Others
- . /etc/sysconfig/laptop-mode
- fi
- MINIMUM_BATTERY_MINUTES=${MINIMUM_BATTERY_MINUTES:-'10'}
-
- ACTION="`cat $BATT_INFO | grep charging | cut -c 26-`"
- if [[ ACTION -eq "discharging" ]]
- then
- PRESENT_RATE=`cat $BATT_INFO | grep "present rate:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
- REMAINING=`cat $BATT_INFO | grep "remaining capacity:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
- fi
- if (($REMAINING * 60 / $PRESENT_RATE < $MINIMUM_BATTERY_MINUTES))
- then
- /sbin/laptop_mode stop
- fi
- else
- logger -p daemon.warning "You are using laptop mode and your battery interface $BATT_INFO is missing. This may lead to loss of data when the battery runs out. Check kernel ACPI support and /proc/acpi/battery folder, and edit /etc/acpi/battery.sh to set BATT_INFO to the correct path."
- fi
- fi
-fi
----------------------------/etc/acpi/actions/battery.sh END--------------------
-
+---------------------------/etc/acpi/actions/battery.sh END-------------------------------------------
Monitoring tool
---------------
Bartek Kania submitted this, it can be used to measure how much time your disk
spends spun up/down.
----------------------------dslm.c BEGIN-----------------------------------------
+---------------------------dslm.c BEGIN-------------------------------------------
/*
* Simple Disk Sleep Monitor
* by Bartek Kania
return 0;
}
----------------------------dslm.c END-------------------------------------------
+---------------------------dslm.c END---------------------------------------------
This should not cause problems for anybody, since everybody using a
2.1.x kernel should have updated their C library to a suitable version
-anyway (see the file "Documentation/Changes".)
+anyway (see the file "linux/Documentation/Changes".)
1.2 Allow Mixed Locks Again
---------------------------
The current list of parameters can be found in the files:
linux/net/TUNABLE
- Documentation/networking/ip-sysctl.txt
+ linux/Documentation/networking/ip-sysctl.txt
Some of these are accessible via the sysctl interface, and many more are
scheduled to be added in this way. For example, some parameters related
The SliceCOM board doesn't require firmware. You can have 4 of these cards
in one machine. The driver doesn't (yet) support shared interrupts, so
you will need a separate IRQ line for every board.
-Read Documentation/networking/slicecom.txt for help on configuring
+Read linux/Documentation/networking/slicecom.txt for help on configuring
this adapter.
THE HDLC/PPP LINE PROTOCOL DRIVER
you have to enable it with a boot time parameter. Prior to 2.4.2-ac18
the NMI-oopser is enabled unconditionally on x86 SMP boxes.
-On x86-64 the NMI oopser is on by default. On 64bit Intel CPUs
-it uses IO-APIC by default and on AMD it uses local APIC.
-
[ feel free to send bug reports, suggestions and patches to
Ingo Molnar <mingo@redhat.com> or the Linux SMP mailing
list at <linux-smp@vger.kernel.org> ]
A lot of the assembly code currently runs in real mode, which means
absolute addresses are used instead of virtual addresses as in the
rest of the kernel. To translate an absolute address to a virtual
-address you can lookup in System.map, add __PAGE_OFFSET (0x10000000
+address you can lookup in System.map, add __PAGE_OFFSET (0xc0000000
currently).
code tried to access.
Typical values for the System Responder address are addresses larger
-than __PAGE_OFFSET (0x10000000) which mean a virtual address didn't
+than __PAGE_OFFSET (0xc0000000) which mean a virtual address didn't
get translated to a physical address before real-mode code tried to
access it.
General Registers as specified by ABI
+ FPU Registers must not be used in kernel mode
+
Control Registers
CR 0 (Recovery Counter) used for ptrace
CR 8 (Protection ID) per-process value*
CR 9, 12, 13 (PIDS) unused
CR10 (CCR) lazy FPU saving*
-CR11 as specified by ABI (SAR)
+CR11 as specified by ABI
CR14 (interruption vector) initialized to fault_vector
CR15 (EIEM) initialized to all ones*
CR16 (Interval Timer) read for cycle count/write starts Interval Tmr
CR17-CR22 interruption parameters
-CR19 Interrupt Instruction Register
-CR20 Interrupt Space Register
-CR21 Interrupt Offset Register
-CR22 Interrupt PSW
CR23 (EIRR) read for pending interrupts/write clears bits
CR24 (TR 0) Kernel Space Page Directory Pointer
CR25 (TR 1) User Space Page Directory Pointer
should be sent to the mailing list available through the suspend2
website, and not to the Linux Kernel Mailing List. We are working
toward merging suspend2 into the mainline kernel.
-
-Q: Kernel thread must voluntarily freeze itself (call 'refrigerator'). But
-I found some kernel threads don't do it, and they don't freeze, and
-so the system can't sleep. Is this a known behavior?
-
-A: All such kernel threads need to be fixed, one by one. Select place
-where it is safe to be frozen (no kernel semaphores should be held at
-that point and it must be safe to sleep there), and add:
-
- if (current->flags & PF_FREEZE)
- refrigerator(PF_FREEZE);
-
-Q: What is the difference between between "platform", "shutdown" and
-"firmware" in /sys/power/disk?
-
-A:
-
-shutdown: save state in linux, then tell bios to powerdown
-
-platform: save state in linux, then tell bios to powerdown and blink
- "suspended led"
-
-firmware: tell bios to save state itself [needs BIOS-specific suspend
- partition, and has very little to do with swsusp]
-
-"platform" is actually right thing to do, but "shutdown" is most
-reliable.
+++ /dev/null
-===========================================================================
- HVCS
- IBM "Hypervisor Virtual Console Server" Installation Guide
- for Linux Kernel 2.6.4+
- Copyright (C) 2004 IBM Corporation
-
-===========================================================================
-NOTE:Eight space tabs are the optimum editor setting for reading this file.
-===========================================================================
-
- Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
- Date Created: March, 02, 2004
- Last Changed: July, 07, 2004
-
----------------------------------------------------------------------------
-Table of contents:
-
- 1. Driver Introduction:
- 2. System Requirements
- 3. Build Options:
- 3.1 Built-in:
- 3.2 Module:
- 4. Installation:
- 5. Connection:
- 6. Disconnection:
- 7. Configuration:
- 8. Questions & Answers:
- 9. Reporting Bugs:
-
----------------------------------------------------------------------------
-1. Driver Introduction:
-
-This is the device driver for the IBM Hypervisor Virtual Console Server,
-"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user
-space applications access to the system consoles of logically partitioned
-operating systems (Linux and AIX) running on the same partitioned Power5
-ppc64 system. Physical hardware consoles per partition are not practical
-on this hardware so system consoles are accessed by this driver using
-firmware interfaces to virtual terminal devices.
-
----------------------------------------------------------------------------
-2. System Requirements:
-
-This device driver was written using 2.6.4 Linux kernel APIs and will only
-build and run on kernels of this version or later.
-
-This driver was written to operate solely on IBM Power5 ppc64 hardware
-though some care was taken to abstract the architecture dependent firmware
-calls from the driver code.
-
-Sysfs must be mounted on the system so that the user can determine which
-major and minor numbers are associated with each vty-server. Directions
-for sysfs mounting are outside the scope of this document.
-
----------------------------------------------------------------------------
-3. Build Options:
-
-The hvcs driver registers itself as a tty driver. The tty layer
-dynamically allocates a block of major and minor numbers in a quantity
-requested by the registering driver. The hvcs driver asks the tty layer
-for 64 of these major/minor numbers by default to use for hvcs device node
-entries.
-
-If the default number of device entries is adequate then this driver can be
-built into the kernel. If not, the default can be over-ridden by inserting
-the driver as a module with insmod parameters.
-
----------------------------------------------------------------------------
-3.1 Built-in:
-
-The following menuconfig example demonstrates selecting to build this
-driver into the kernel.
-
- Device Drivers --->
- Character devices --->
- <*> IBM Hypervisor Virtual Console Server Support
-
-Begin the kernel make process.
-
----------------------------------------------------------------------------
-3.2 Module:
-
-The following menuconfig example demonstrates selecting to build this
-driver as a kernel module.
-
- Device Drivers --->
- Character devices --->
- <M> IBM Hypervisor Virtual Console Server Support
-
-The make process will build the following kernel modules:
-
- hvcs.ko
- hvcserver.ko
-
-To insert the module with the default allocation execute the following
-commands in the order they appear:
-
- insmod hvcserver.ko
- insmod hvcs.ko
-
-The hvcserver module contains architecture specific firmware calls and must
-be inserted first, otherwise the hvcs module will not find some of the
-symbols it expects.
-
-To override the default use an insmod parameter as follows (requesting 4
-tty devices as an example):
-
- insmod hvcs.ko hvcs_parm_num_devs=4
-
-There is a maximum number of dev entries that can be specified on insmod.
-We think that 1024 is currently a decent maximum number of server adapters
-to allow. This can always be changed by modifying the constant in the
-source file before building.
-
-NOTE: The length of time it takes to insmod the driver seems to be related
-to the number of tty interfaces the registering driver requests.
-
-In order to remove the driver module execute the following command:
-
- rmmod hvcs.ko
-
-The recommended method for installing hvcs as a module is to use depmod to
-build a current modules.dep file in /lib/modules/`uname -r` and then
-execute:
-
-modprobe hvcs hvcs_parm_num_devs=4
-
-The modules.dep file indicates that hvcserver.ko needs to be inserted
-before hvcs.ko and modprobe uses this file to smartly insert the modules in
-the proper order.
-
-The following modprobe command is used to remove hvcs and hvcserver in the
-proper order:
-
-modprobe -r hvcs
-
----------------------------------------------------------------------------
-4. Installation:
-
-The tty layer creates sysfs entries which contain the major and minor
-numbers allocated for the hvcs driver. The following snippet of "tree"
-output of the sysfs directory shows where these numbers are presented:
-
- sys/
- |-- *other sysfs base dirs*
- |
- |-- class
- | |-- *other classes of devices*
- | |
- | `-- tty
- | |-- *other tty devices*
- | |
- | |-- hvcs0
- | | `-- dev
- | |-- hvcs1
- | | `-- dev
- | |-- hvcs2
- | | `-- dev
- | |-- hvcs3
- | | `-- dev
- | |
- | |-- *other tty devices*
- |
- |-- *other sysfs base dirs*
-
-For the above examples the following output is a result of cat'ing the
-"dev" entry in the hvcs directory:
-
- Pow5:/sys/class/tty/hvcs0/ # cat dev
- 254:0
-
- Pow5:/sys/class/tty/hvcs1/ # cat dev
- 254:1
-
- Pow5:/sys/class/tty/hvcs2/ # cat dev
- 254:2
-
- Pow5:/sys/class/tty/hvcs3/ # cat dev
- 254:3
-
-The output from reading the "dev" attribute is the char device major and
-minor numbers that the tty layer has allocated for this driver's use. Most
-systems running hvcs will already have the device entries created or udev
-will do it automatically.
-
-Given the example output above, to manually create a /dev/hvcs* node entry
-mknod can be used as follows:
-
- mknod /dev/hvcs0 c 254 0
- mknod /dev/hvcs1 c 254 1
- mknod /dev/hvcs2 c 254 2
- mknod /dev/hvcs3 c 254 3
-
-Using mknod to manually create the device entries makes these device nodes
-persistent. Once created they will exist prior to the driver insmod.
-
-Attempting to connect an application to /dev/hvcs* prior to insertion of
-the hvcs module will result in an error message similar to the following:
-
- "/dev/hvcs*: No such device".
-
-NOTE: Just because there is a device node present doesn't mean that there
-is a vty-server device configured for that node.
-
----------------------------------------------------------------------------
-5. Connection
-
-Since this driver controls devices that provide a tty interface a user can
-interact with the device node entries using any standard tty-interactive
-method (e.g. "cat", "dd", "echo"). The intent of this driver however, is
-to provide real time console interaction with a Linux partition's console,
-which requires the use of applications that provide bi-directional,
-interactive I/O with a tty device.
-
-Applications (e.g. "minicom" and "screen") that act as terminal emulators
-or perform terminal type control sequence conversion on the data being
-passed through them are NOT acceptable for providing interactive console
-I/O. These programs often emulate antiquated terminal types (vt100 and
-ANSI) and expect inbound data to take the form of one of these supported
-terminal types but they either do not convert, or do not _adequately_
-convert, outbound data into the terminal type of the terminal which invoked
-them (though screen makes an attempt and can apparently be configured with
-much termcap wrestling.)
-
-For this reason kermit and cu are two of the recommended applications for
-interacting with a Linux console via an hvcs device. These programs simply
-act as a conduit for data transfer to and from the tty device. They do not
-require inbound data to take the form of a particular terminal type, nor do
-they cook outbound data to a particular terminal type.
-
-In order to ensure proper functioning of console applications one must make
-sure that once connected to a /dev/hvcs console that the console's $TERM
-env variable is set to the exact terminal type of the terminal emulator
-used to launch the interactive I/O application. If one is using xterm and
-kermit to connect to /dev/hvcs0 when the console prompt becomes available
-one should "export TERM=xterm" on the console. This tells ncurses
-applications that are invoked from the console that they should output
-control sequences that xterm can understand.
-
-As a precautionary measure an hvcs user should always "exit" from their
-session before disconnecting an application such as kermit from the device
-node. If this is not done, the next user to connect to the console will
-continue using the previous user's logged in session which includes
-using the $TERM variable that the previous user supplied.
-
----------------------------------------------------------------------------
-6. Disconnection
-
-As a security feature to prevent the delivery of stale data to an
-unintended target the Power5 system firmware disables the fetching of data
-and discards that data when a connection between a vty-server and a vty has
-been severed. As an example, when a vty-server is immediately disconnected
-from a vty following output of data to the vty the vty adapter may not have
-enough time between when it received the data interrupt and when the
-connection was severed to fetch the data from firmware before the fetch is
-disabled by firmware.
-
-When hvcs is being used to serve consoles this behavior is not a huge issue
-because the adapter stays connected for large amounts of time following
-almost all data writes. When hvcs is being used as a tty conduit to tunnel
-data between two partitions [see Q & A below] this is a huge problem
-because the standard Linux behavior when cat'ing or dd'ing data to a device
-is to open the tty, send the data, and then close the tty. If this driver
-manually terminated vty-server connections on tty close this would close
-the vty-server and vty connection before the target vty has had a chance to
-fetch the data.
-
-Additionally, disconnecting a vty-server and vty only on module removal or
-adapter removal is impractical because other vty-servers in other
-partitions may require the usage of the target vty at any time.
-
-Due to this behavioral restriction disconnection of vty-servers from the
-connected vty is a manual procedure using a write to a sysfs attribute
-outlined below, on the other hand the initial vty-server connection to a
-vty is established automatically by this driver. Manual vty-server
-connection is never required.
-
-In order to terminate the connection between a vty-server and vty the
-"vterm_state" sysfs attribute within each vty-server's sysfs entry is used.
-Reading this attribute reveals the current connection state of the
-vty-server adapter. A zero means that the vty-server is not connected to a
-vty. A one indicates that a connection is active.
-
-Writing a '0' (zero) to the vterm_state attribute will disconnect the VTERM
-connection between the vty-server and target vty ONLY if the vterm_state
-previously read '1'. The write directive is ignored if the vterm_state
-read '0' or if any value other than '0' was written to the vterm_state
-attribute. The following example will show the method used for verifying
-the vty-server connection status and disconnecting a vty-server connection.
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
- 1
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo 0 > vterm_state
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
- 0
-
-All vty-server connections are automatically terminated when the device is
-hotplug removed and when the module is removed.
-
----------------------------------------------------------------------------
-7. Configuration
-
-Each vty-server has a sysfs entry in the /sys/devices/vio directory, which
-is symlinked in several other sysfs tree directories, notably under the
-hvcs driver entry, which looks like the following example:
-
- Pow5:/sys/bus/vio/drivers/hvcs # ls
- . .. 30000003 30000004 rescan
-
-By design, firmware notifies the hvcs driver of vty-server lifetimes and
-partner vty removals but not the addition of partner vtys. Since an HMC
-Super Admin can add partner info dynamically we have provided the hvcs
-driver sysfs directory with the "rescan" update attribute which will query
-firmware and update the partner info for all the vty-servers that this
-driver manages. Writing a '1' to the attribute triggers the update. An
-explicit example follows:
-
- Pow5:/sys/bus/vio/drivers/hvcs # echo 1 > rescan
-
-Reading the attribute will indicate a state of '1' or '0'. A one indicates
-that an update is in process. A zero indicates that an update has
-completed or was never executed.
-
-Vty-server entries in this directory are a 32 bit partition unique unit
-address that is created by firmware. An example vty-server sysfs entry
-looks like the following:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls
- . current_vty devspec partner_clcs vterm_state
- .. detach_state name partner_vtys
-
-Each entry is provided, by default with a "name" attribute. Reading the
-"name" attribute will reveal the device type as shown in the following
-example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name
- vty-server
-
-Each entry is also provided, by default, with a "devspec" attribute which
-reveals the full device specification when read, as shown in the following
-example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec
- /vdevice/vty-server@30000004
-
-Each vty-server sysfs dir is provided with two read-only attributes that
-provide lists of easily parsed partner vty data: "partner_vtys" and
-"partner_clcs".
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys
- 30000000
- 30000001
- 30000002
- 30000000
- 30000000
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_clcs
- U5112.428.103048A-V3-C0
- U5112.428.103048A-V3-C2
- U5112.428.103048A-V3-C3
- U5112.428.103048A-V4-C0
- U5112.428.103048A-V5-C0
-
-Reading partner_vtys returns a list of partner vtys. Vty unit address
-numbering is only per-partition-unique so entries will frequently repeat.
-
-Reading partner_clcs returns a list of "converged location codes" which are
-composed of a system serial number followed by "-V*", where the '*' is the
-target partition number, and "-C*", where the '*' is the slot of the
-adapter. The first vty partner corresponds to the first clc item, the
-second vty partner to the second clc item, etc.
-
-A vty-server can only be connected to a single vty at a time. The entry,
-"current_vty" prints the clc of the currently selected partner vty when
-read.
-
-The current_vty can be changed by writing a valid partner clc to the entry
-as in the following example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304
- 8A-V4-C0 > current_vty
-
-Changing the current_vty when a vty-server is already connected to a vty
-does not affect the current connection. The change takes effect when the
-currently open connection is freed.
-
-Information on the "vterm_state" attribute was covered earlier on the
-chapter entitled "disconnection".
-
----------------------------------------------------------------------------
-8. Questions & Answers:
-===========================================================================
-Q: What are the security concerns involving hvcs?
-
-A: There are three main security concerns:
-
- 1. The creator of the /dev/hvcs* nodes has the ability to restrict
- the access of the device entries to certain users or groups. It
- may be best to create a special hvcs group privilege for providing
- access to system consoles.
-
- 2. To provide network security when grabbing the console it is
- suggested that the user connect to the console hosting partition
- using a secure method, such as SSH or sit at a hardware console.
-
- 3. Make sure to exit the user session when done with a console or
- the next vty-server connection (which may be from another
- partition) will experience the previously logged in session.
-
----------------------------------------------------------------------------
-Q: How do I multiplex a console that I grab through hvcs so that other
-people can see it:
-
-A: You can use "screen" to directly connect to the /dev/hvcs* device and
-setup a session on your machine with the console group privileges. As
-pointed out earlier by default screen doesn't provide the termcap settings
-for most terminal emulators to provide adequate character conversion from
-term type "screen" to others. This means that curses based programs may
-not display properly in screen sessions.
-
----------------------------------------------------------------------------
-Q: Why are the colors all messed up?
-Q: Why are the control characters acting strange or not working?
-Q: Why is the console output all strange and unintelligible?
-
-A: Please see the preceding section on "Connection" for a discussion of how
-applications can affect the display of character control sequences.
-Additionally, just because you logged into the console using and xterm
-doesn't mean someone else didn't log into the console with the HMC console
-(vt320) before you and leave the session logged in. The best thing to do
-is to export TERM to the terminal type of your terminal emulator when you
-get the console. Additionally make sure to "exit" the console before you
-disconnect from the console. This will ensure that the next user gets
-their own TERM type set when they login.
-
----------------------------------------------------------------------------
-Q: When I try to CONNECT kermit to an hvcs device I get:
-"Sorry, can't open connection: /dev/hvcs*"What is happening?
-
-A: Some other Power5 console mechanism has a connection to the vty and
-isn't giving it up. You can try to force disconnect the consoles from the
-HMC by right clicking on the partition and then selecting "close terminal".
-Otherwise you have to hunt down the people who have console authority. It
-is possible that you already have the console open using another kermit
-session and just forgot about it. Please review the console options for
-Power5 systems to determine the many ways a system console can be held.
-
-OR
-
-A: Another user may not have a connectivity method currently attached to a
-/dev/hvcs device but the vterm_state may reveal that they still have the
-vty-server connection established. They need to free this using the method
-outlined in the section on "Disconnection" in order for others to connect
-to the target vty.
-
-OR
-
-A: The user profile you are using to execute kermit probably doesn't have
-permissions to use the /dev/hvcs* device.
-
-OR
-
-A: You probably haven't inserted the hvcs.ko module yet but the /dev/hvcs*
-entry still exists (on systems without udev).
-
-OR
-
-A: There is not a corresponding vty-server device that maps to an existing
-/dev/hvcs* entry.
-
----------------------------------------------------------------------------
-Q: When I try to CONNECT kermit to an hvcs device I get:
-"Sorry, write access to UUCP lockfile directory denied."
-
-A: The /dev/hvcs* entry you have specified doesn't exist where you said it
-does? Maybe you haven't inserted the module (on systems with udev).
-
----------------------------------------------------------------------------
-Q: If I already have one Linux partition installed can I use hvcs on said
-partition to provide the console for the install of a second Linux
-partition?
-
-A: Yes granted that your are connected to the /dev/hvcs* device using
-kermit or cu or some other program that doesn't provide terminal emulation.
-
----------------------------------------------------------------------------
-Q: Can I connect to more than one partition's console at a time using this
-driver?
-
-A: Yes. Of course this means that there must be more than one vty-server
-configured for this partition and each must point to a disconnected vty.
-
----------------------------------------------------------------------------
-Q: Does the hvcs driver support dynamic (hotplug) addition of devices?
-
-A: Yes, if you have dlpar and hotplug enabled for your system and it has
-been built into the kernel the hvcs drivers is configured to dynamically
-handle additions of new devices and removals of unused devices.
-
----------------------------------------------------------------------------
-Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty
-device on that partition as the other end of the pipe?
-
-A: Yes, on Power5 platforms the hvc_console driver provides a tty interface
-for extra /dev/hvc* devices (where /dev/hvc0 is most likely the console).
-In order to get a tty conduit working between the two partitions the HMC
-Super Admin must create an additional "serial server" for the target
-partition with the HMC gui which will show up as /dev/hvc* when the target
-partition is rebooted.
-
-The HMC Super Admin then creates an additional "serial client" for the
-current partition and points this at the target partition's newly created
-"serial server" adapter (remember the slot). This shows up as an
-additional /dev/hvcs* device.
-
-Now a program on the target system can be configured to read or write to
-/dev/hvc* and another program on the current partition can be configured to
-read or write to /dev/hvcs*. Now you have a tty conduit between two
-partitions.
-
----------------------------------------------------------------------------
-9. Reporting Bugs:
-
-The proper channel for reporting bugs is either through the Linux OS
-distribution company that provided your OS or by posting issues to the
-ppc64 development mailing list at:
-
-linuxppc64-dev@lists.linuxppc.org
-
-This request is to provide a documented and searchable public exchange
-of the problems and solutions surrounding this driver for the benefit of
-all users.
+++ /dev/null
-Linux 2.6.x on MPC52xx family
------------------------------
-
-For the latest info, go to http://www.246tNt.com/mpc52xx/state.txt
-
-To compile/use :
-
- - U-Boot:
- # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
- if you wish to ).
- # make lite5200_defconfig
- # make uImage
-
- then, on U-boot:
- => tftpboot 200000 uImage
- => tftpboot 400000 pRamdisk
- => bootm 200000 400000
-
- - DBug:
- # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
- if you wish to ).
- # make lite5200_defconfig
- # cp your_initrd.gz arch/ppc/boot/images/ramdisk.image.gz
- # make zImage.initrd
- # make
-
- then in DBug:
- DBug> dn -i zImage.initrd.lite5200
-
-
-Some remarks :
- - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100
- is not supported, and I'm not sure anyone is interesting in working on it
- so. I didn't took 5xxx because there's apparently a lot of 5xxx that have
- nothing to do with the MPC5200. I also included the 'MPC' for the same
- reason.
- - Of course, I inspired myself from the 2.4 port. If you think I forgot to
- mention you/your company in the copyright of some code, I'll correct it
- ASAP.
- - The codes wants the MBAR to be set at 0xf0000000 by the bootloader. It's
- mapped 1:1 with the MMU. If for whatever reason, you want to change this,
- beware that some code depends on the 0xf0000000 address and other depends
- on the 1:1 mapping.
- - Most of the code assumes that port multiplexing, frequency selection, ...
- has already been done. IMHO this should be done as early as possible, in
- the bootloader. If for whatever reason you can't do it there, do it in the
- platform setup code (if U-Boot) or in the arch/ppc/boot/simple/... (if
- DBug)
Then notify /sbin/init that /etc/inittab has changed, by issuing
the telinit command with the q operand:
- cd Documentation/s390
+ cd /usr/src/linux/Documentation/s390
sh config3270.sh
sh /tmp/mkdev3270
telinit q
Documentation
=============
There is a SCSI documentation directory within the kernel source tree,
-typically Documentation/scsi . Most documents are in plain
+typically /usr/src/linux/Documentation/scsi . Most documents are in plain
(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
found in that directory. A more recent copy of this document may be found
at http://www.torque.net/scsi/scsi_mid_low_api.txt.gz .
<para>
More precise information can be found in
- <filename>Documentation/sound/alsa/ControlNames.txt</filename>.
+ <filename>alsa-kernel/Documentation/sound/alsa/ControlNames.txt</filename>.
</para>
</section>
</section>
The callback is much more complicated than the text-file
version. You need to use a low-level i/o functions such as
<function>copy_from/to_user()</function> to transfer the
- data.
+ data. Also, you have to keep tracking the file position, too.
<informalexample>
<programlisting>
static long my_file_io_read(snd_info_entry_t *entry,
void *file_private_data,
struct file *file,
- char *buf,
- unsigned long count,
- unsigned long pos)
+ char *buf, long count)
{
long size = count;
- if (pos + size > local_max_size)
- size = local_max_size - pos;
- if (copy_to_user(buf, local_data + pos, size))
+ if (file->f_pos + size > local_max_size)
+ size = local_max_size - file->f_pos;
+ if (copy_to_user(buf, local_data + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
return size;
}
]]>
# insmod awe_wave
(Be sure to load awe_wave after sb!)
- See Documentation/sound/oss/AWE32 for
+ See /usr/src/linux/Documentation/sound/oss/AWE32 for
more details.
9. (only for obsolete systems) If you don't have /dev/sequencer
========
0.1.0 11/20/1998 First version, draft
1.0.0 11/1998 Alan Cox changes, incorporation in 2.2.0
- as Documentation/sound/oss/Introduction
+ as /usr/src/linux/Documentation/sound/oss/Introduction
1.1.0 6/30/1999 Second version, added notes on making the drivers,
added info on multiple sound cards of similar types,]
added more diagnostics info, added info about esd.
4) OSS's WWW site at http://www.opensound.com.
-5) All the files in Documentation/sound.
+5) All the files in linux/Documentation/sound.
6) The comments and code in linux/drivers/sound.
This documentation is relevant for the PAS16 driver (pas2_card.c and
friends) under kernel version 2.3.99 and later. If you are
unfamiliar with configuring sound under Linux, please read the
-Sound-HOWTO, Documentation/sound/oss/Introduction and other
+Sound-HOWTO, linux/Documentation/sound/oss/Introduction and other
relevant docs first.
The following information is relevant information from README.OSS
The new stuff for 2.3.99 and later
============================================================================
-The following configuration options from Documentation/Configure.help
+The following configuration options from linux/Documentation/Configure.help
are relevant to configuring the PAS16:
Sound card support
dev/ device specific information (eg dev/cdrom/info)
fs/ specific filesystems
filehandle, inode, dentry and quota tuning
- binfmt_misc <Documentation/binfmt_misc.txt>
+ binfmt_misc <linux/Documentation/binfmt_misc.txt>
kernel/ global kernel info / tuning
miscellaneous stuff
net/ networking stuff, for documentation look in:
- <Documentation/networking/>
+ <linux/Documentation/networking/>
proc/ <empty>
sunrpc/ SUN Remote Procedure Call (NFS)
vm/ memory management tuning
- dirty_writeback_centisecs
- max_map_count
- min_free_kbytes
-- laptop_mode
-- block_dump
==============================================================
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
-dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump:
+dirty_writeback_centisecs, vfs_cache_pressure:
See Documentation/filesystems/proc.txt
NOTE:
The USB subsystem now has a substantial section in "The Linux Kernel API"
- guide (in Documentation/DocBook), generated from the current source
+ guide (in linux/Documentation/DocBook), generated from the current source
code. This particular documentation file isn't particularly current or
complete; don't rely on it except for a quick overview.
2000-July-12
For USB help other than the readme files that are located in
-Documentation/usb/*, see the following:
+linux/Documentation/usb/*, see the following:
Linux-USB project: http://www.linux-usb.org
mirrors at http://www.suse.cz/development/linux-usb/
Information - video4linux:
http://roadrunner.swansea.linux.org.uk/v4lapi.shtml
-Documentation/video4linux/API.html
+/usr/src/linux/Documentation/video4linux/API.html
/usr/include/linux/videodev.h
Information - video4linux/mjpeg extensions:
used to change the file attributes on hugetlbfs.
Also, it is important to note that no such mount command is required if the
-applications are going to use only shmat/shmget system calls. It is possible
-for same or different applications to use any combination of mmaps and shm*
-calls. Though the mount of filesystem will be required for using mmaps.
+applications are going to use only shmat/shmget system calls. Users who
+wish to use hugetlb page via shared memory segment should be a member of
+a supplementary group and system admin needs to configure that gid into
+/proc/sys/vm/hugetlb_shm_group. It is possible for same or different
+applications to use any combination of mmaps and shm* calls. Though the
+mount of filesystem will be required for using mmaps.
/* Example of using hugepage in user application using Sys V shared memory
* system calls. In this example, app is requesting memory of size 256MB that
+++ /dev/null
-
-debug_switch:
-
- 0 1
-
- 1 2
-
- 2 4
-
- 3 8
-
- 4 16
-
- 5 32
-
- 6 64
-
- 7 128
-
-
-debug_xid:
-
- 0 1 "alloc_vx_info(%d) = %p\n"
- "dealloc_vx_info(%p)"
- "loc_vx_info(%d) = %p (not available)"
- "loc_vx_info(%d) = %p (found)"
- "loc_vx_info(%d) = %p (new)"
-
- 1 2 "alloc_vx_info(%d)*"
- "loc_vx_info(%d)*"
- "locate_vx_info(%d)"
-
- 2 4 "get_vx_info(%p[#%d.%d])"
- "put_vx_info(%p[#%d.%d])"
-
- 3 8 "set_vx_info(%p[#%d.%d.%d])"
- "clr_vx_info(%p[#%d.%d.%d])"
- "rcu_free_vx_info(%p): uc=%d"
-
- 4 16 "__hash_vx_info: %p[#%d]"
- "__unhash_vx_info: %p[#%d]"
- "__vx_dynamic_id: [#%d]"
-
- 5 32 "vx_migrate_task(%p,%p[#%d.%d])"
- "task_get_vx_info(%p)"
-
- 6 64
-
- 7 128
-
-
-debug_nid:
-
- 0 1 "alloc_nx_info() = %p"
- "dealloc_nx_info(%p)"
- "loc_nx_info(%d) = %p (not available)"
- "loc_nx_info(%d) = %p (found)"
- "loc_nx_info(%d) = %p (new)"
-
- 1 2 "alloc_nx_info(%d)*"
- "loc_nx_info(%d)*"
-
- 2 4 "get_nx_info(%p[#%d.%d])"
- "put_nx_info(%p[#%d.%d])"
-
- 3 8 "set_nx_info(%p[#%d.%d.%d])"
- "clr_nx_info(%p[#%d.%d.%d])"
- "rcu_free_nx_info(%p): uc=%d"
-
- 4 16 "__hash_nx_info: %p[#%d]"
- "__unhash_nx_info: %p[#%d]"
- "__nx_dynamic_id: [#%d]"
-
- 5 32 "nx_migrate_task(%p,%p[#%d.%d])"
- "task_get_nx_info(%p)"
- "create_nx_info()"
-
- 6 64
-
- 7 128
-
-
-debug_dlim:
-
- 0 1 "alloc_dl_info(%p,%d) = %p"
- "dealloc_dl_info(%p)"
- "locate_dl_info(%p,#%d) = %p"
-
- 1 2 "alloc_dl_info(%p,%d)*"
-
- 2 4 "get_dl_info(%p[#%d.%d])"
- "put_dl_info(%p[#%d.%d])"
-
- 3 8 "rcu_free_dl_info(%p)"
- "__hash_dl_info: %p[#%d]"
- "__unhash_dl_info: %p[#%d]"
-
-
- 4 16 "ALLOC (%p,#%d)%c inode (%d)"
- "FREE (%p,#%d)%c inode"
-
- 5 32 "ALLOC (%p,#%d)%c %lld bytes (%d)"
- "FREE (%p,#%d)%c %lld bytes"
-
- 6 64 "ADJUST: %lld,%lld on %d,%d [mult=%d]"
-
- 7 128 "ext3_has_free_blocks(%p): free=%u, root=%u"
- "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d"
-
-
-
-debug_cvirt:
-
-
- 0 1
-
- 1 2
-
- 2 4 "vx_map_tgid: %p/%llx: %d -> %d"
- "vx_rmap_tgid: %p/%llx: %d -> %d"
-
- 3 8
-
- 4 16
-
- 5 32
-
- 6 64
-
- 7 128
-
-
-
-debug_net:
-
-
- 0 1
-
- 1 2
-
- 2 4 "tcp_in_list(%p) %p,%p;%lx"
-
- 3 8 "inet_bind(%p) %p,%p;%lx"
-
- 4 16 "ip_route_connect(%p) %p,%p;%lx"
-
- 5 32 "tcp_ipv4_addr_conflict(%p,%p) %p,%p;%lx %p,%p;%lx"
-
- 6 64 "sk: %p [#%d] (from %d)"
- "sk,req: %p [#%d] (from %d)"
- "sk,egf: %p [#%d] (from %d)"
- "sk,egn: %p [#%d] (from %d)"
- "tw: %p [#%d] (from %d)"
-
- 7 128 "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
- "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
-
-
-
-
-debug_limit:
-
- n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s"
- "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
-
- m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s"
- "vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
- "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
-
-
DEFXX FDDI NETWORK DRIVER
P: Maciej W. Rozycki
-M: macro@linux-mips.org
+M: macro@ds2.pg.gda.pl
S: Maintained
DELL LAPTOP SMM DRIVER
L: linuxppc-embedded@lists.linuxppc.org
S: Maintained
-LINUX FOR POWERPC EMBEDDED PPC8XX AND BOOT CODE
-P: Tom Rini
-M: trini@kernel.crashing.org
-W: http://www.penguinppc.org/
-L: linuxppc-embedded@lists.linuxppc.org
-S: Maintained
-
LINUX FOR POWERPC EMBEDDED PPC85XX
P: Kumar Gala
M: kumar.gala@freescale.com
L: linux-scsi@vger.kernel.org
S: Maintained
-M68K ARCHITECTURE
-P: Geert Uytterhoeven
-M: geert@linux-m68k.org
-P: Roman Zippel
-M: zippel@linux-m68k.org
+M68K
+P: Jes Sorensen
+M: jes@trained-monkey.org
+W: http://www.clark.net/pub/lawrencc/linux/index.html
L: linux-m68k@lists.linux-m68k.org
-W: http://www.linux-m68k.org/
-W: http://linux-m68k-cvs.ubb.ca/
S: Maintained
M68K ON APPLE MACINTOSH
P: David Woodhouse
M: dwmw2@redhat.com
W: http://www.linux-mtd.infradead.org/
-L: linux-mtd@lists.infradead.org
+L: mtd@infradead.org
S: Maintained
MICROTEK X6 SCANNER
M: jmorris@redhat.com
P: Hideaki YOSHIFUJI
M: yoshfuji@linux-ipv6.org
-P: Patrick McHardy
-M: kaber@coreworks.de
L: netdev@oss.sgi.com
S: Maintained
ONSTREAM SCSI TAPE DRIVER
P: Willem Riede
M: osst@riede.org
-L: osst-users@lists.sourceforge.net
+L: osst@linux1.onstream.nl
L: linux-scsi@vger.kernel.org
S: Maintained
S: Maintained
SPARC (sparc32):
-P: William L. Irwin
-M: wli@holomorphy.com
+P: Keith M. Wesolowski
+M: wesolows@foobazco.org
L: sparclinux@vger.kernel.org
S: Maintained
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 8
-EXTRAVERSION = -1.521.2.5.planetlab
+SUBLEVEL = 7
+EXTRAVERSION = -1.492-ckrm.E15-vs1.9.1
NAME=Zonked Quokka
# *DOCUMENTATION*
KBUILD_CHECKSRC = 0
endif
-# Use make M=dir to specify directory of external module to build
+# Use make M=dir to specify direcotry of external module to build
# Old syntax make ... SUBDIRS=$PWD is still supported
# Setting the environment variable KBUILD_EXTMOD take precedence
ifdef SUBDIRS
_all: modules
endif
+# Make sure we're not wasting cpu-cycles doing locale handling, yet do make
+# sure error messages appear in the user-desired language
+ifdef LC_ALL
+LANG := $(LC_ALL)
+LC_ALL :=
+endif
+LC_COLLATE := C
+LC_CTYPE := C
+export LANG LC_ALL LC_COLLATE LC_CTYPE
+
srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
TOPDIR := $(srctree)
# FIXME - TOPDIR is obsolete, use srctree/objtree
$(sort $(vmlinux-objs)) arch/$(ARCH)/kernel/vmlinux.lds.s: $(vmlinux-dirs) ;
-# Handle descending into subdirectories listed in $(vmlinux-dirs)
-# Preset locale variables to speed up the build process. Limit locale
-# tweaks to this spot to avoid wrong language settings when running
-# make menuconfig etc.
-# Error messages still appears in the original language
+# Handle descending into subdirectories listed in $(vmlinux-dirs)
.PHONY: $(vmlinux-dirs)
$(vmlinux-dirs): prepare-all scripts
# A multi level approach is used. prepare1 is updated first, then prepare0.
# prepare-all is the collection point for the prepare targets.
-.PHONY: prepare-all prepare prepare0 prepare1 prepare2
-
-# prepare 2 generate Makefile to be placed in output directory, if
-# using a seperate output directory. This allows convinient use
-# of make in output directory
-prepare2:
- $(Q)if [ ! $(srctree) -ef $(objtree) ]; then \
- $(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
- $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) \
- > $(objtree)/Makefile; \
- fi
+.PHONY: prepare-all prepare prepare0 prepare1
# prepare1 is used to check if we are building in a separate output directory,
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
# 2) Create the include2 directory, used for the second asm symlink
-prepare1: prepare2
+prepare1:
ifneq ($(KBUILD_SRC),)
@echo ' Using $(srctree) as source for kernel'
$(Q)if [ -h $(srctree)/include/asm -o -f $(srctree)/.config ]; then \
sleep 1; \
fi
@rm -rf $(MODLIB)/kernel
- @rm -f $(MODLIB)/source
+ @rm -f $(MODLIB)/build
@mkdir -p $(MODLIB)/kernel
- @ln -s $(srctree) $(MODLIB)/source
- @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
- rm -f $(MODLIB)/build ; \
- ln -s $(objtree) $(MODLIB)/build ; \
- fi
+ @ln -s $(TOPDIR) $(MODLIB)/build
$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
# If System.map exists, run depmod. This deliberately does not have a
# ---------------------------------------------------------------------------
define all-sources
- ( find $(srctree) $(RCS_FIND_IGNORE) \
+ ( find . $(RCS_FIND_IGNORE) \
\( -name include -o -name arch \) -prune -o \
-name '*.[chS]' -print; \
- find $(srctree)/arch/$(ARCH) $(RCS_FIND_IGNORE) \
+ find arch/$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/security/selinux/include $(RCS_FIND_IGNORE) \
+ find security/selinux/include $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/include $(RCS_FIND_IGNORE) \
+ find include $(RCS_FIND_IGNORE) \
\( -name config -o -name 'asm-*' \) -prune \
-o -name '*.[chS]' -print; \
- find $(srctree)/include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
+ find include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/include/asm-generic $(RCS_FIND_IGNORE) \
+ find include/asm-generic $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print )
endef
- There are various README files in the Documentation/ subdirectory:
these typically contain kernel-specific installation notes for some
- drivers for example. See Documentation/00-INDEX for a list of what
+ drivers for example. See ./Documentation/00-INDEX for a list of what
is contained in each file. Please read the Changes file, as it
contains information about the problems, which may result by upgrading
your kernel.
Compiling and running the 2.6.xx kernels requires up-to-date
versions of various software packages. Consult
- Documentation/Changes for the minimum version numbers required
+ ./Documentation/Changes for the minimum version numbers required
and how to get updates for these packages. Beware that using
excessively old versions of these packages can cause indirect
errors that are very difficult to track down, so don't assume that
gcc 2.91.66 (egcs-1.1.2), and gcc 2.7.2.3 are known to miscompile
some parts of the kernel, and are *no longer supported*.
Also remember to upgrade your binutils package (for as/ld/nm and company)
- if necessary. For more information, refer to Documentation/Changes.
+ if necessary. For more information, refer to ./Documentation/Changes.
Please note that you can still run a.out user programs with this kernel.
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
jmp $31, do_sys_ptrace
.end sys_ptrace
- .align 4
- .globl sys_execve
- .ent sys_execve
-sys_execve:
- .prologue 0
- mov $sp, $19
- jmp $31, do_sys_execve
-.end sys_execve
-
- .align 4
- .globl osf_sigprocmask
- .ent osf_sigprocmask
-osf_sigprocmask:
- .prologue 0
- mov $sp, $18
- jmp $31, do_osf_sigprocmask
-.end osf_sigprocmask
-
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
/*
* sys_execve() executes a new program.
+ *
+ * This works due to the alpha calling sequence: the first 6 args
+ * are gotten from registers, while the rest is on the stack, so
+ * we get a0-a5 for free, and then magically find "struct pt_regs"
+ * on the stack for us..
+ *
+ * Don't do this at home.
*/
asmlinkage int
-do_sys_execve(char __user *ufilename, char __user * __user *argv,
- char __user * __user *envp, struct pt_regs *regs)
+sys_execve(char __user *ufilename, char __user * __user *argv,
+ char __user * __user *envp,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct pt_regs regs)
{
int error;
char *filename;
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
- error = do_execve(filename, argv, envp, regs);
+ error = do_execve(filename, argv, envp, ®s);
putname(filename);
out:
return error;
* operation, as all of this is local to this thread.
*/
asmlinkage unsigned long
-do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs)
+osf_sigprocmask(int how, unsigned long newmask, long a2, long a3,
+ long a4, long a5, struct pt_regs regs)
{
unsigned long oldmask = -EINVAL;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
- regs->r0 = 0; /* special no error return */
+ (®s)->r0 = 0; /* special no error return */
}
return oldmask;
}
* Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de>
*
* This driver is at all a modified version of Erik Mouw's
- * Documentation/DocBook/procfs_example.c, so: thank
+ * ./linux/Documentation/DocBook/procfs_example.c, so: thank
* you, Erik! He can be reached via email at
* <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
* provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 220 */
.quad alpha_ni_syscall
-#ifdef CONFIG_TUX
- .quad __sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .quad sys_tux
-# else
.quad alpha_ni_syscall
-# endif
-#endif
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 225 */
# Select various configuration options depending on the machine type
config DISCONTIGMEM
bool
- depends on ARCH_EDB7211 || ARCH_SA1100 || (ARCH_LH7A40X && !LH7A40X_CONTIGMEM)
+ depends on ARCH_EDB7211 || ARCH_SA1100 || (ARCH_LH7A40X && !LH7A40X_SROMLL)
default y
help
Say Y to support efficient handling of discontiguous physical memory,
help
This enables the CPUfreq driver for ARM Integrator CPUs.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say Y.
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x
-CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) $(call check_gcc,-malignment-traps,-mshort-load-bytes) -msoft-float -Wa,-mno-fpu -Uarm
+CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
-CHECK := $(CHECK) -D__arm__=1
-
#Default value
DATAADDR := .
--defsym params_phys=$(PARAMS_PHYS) -T
AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\"
-targets := bootp init.o kernel.o initrd.o
+targets := bootp bootp.lds init.o kernel.o initrd.o
# Note that bootp.lds picks up kernel.o and initrd.o
-$(obj)/bootp: $(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE
+$(obj)/bootp: $(addprefix $(obj)/,bootp.lds init.o kernel.o initrd.o) FORCE
$(call if_changed,ld)
@:
.type _start, #function
.globl _start
-_start: add lr, pc, #-0x8 @ lr = current load addr
- adr r13, data
+_start: adr r13, data
ldmia r13!, {r4-r6} @ r5 = dest, r6 = length
- add r4, r4, lr @ r4 = initrd_start + load addr
bl move @ move the initrd
/*
CFLAGS_font.o := -Dstatic=
$(obj)/font.o: $(FONTC)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in Makefile arch/arm/boot/Makefile .config
@sed "$(SEDFLAGS)" < $< > $@
$(obj)/misc.o: $(obj)/misc.c include/asm/arch/uncompress.h lib/inflate.c
mov r0, #0x30
mcr p15, 0, r0, c1, c0, 0
mov r0, #0x13
- msr cpsr_cxsf, r0
+ msr cpsr, r0
mov r12, #0x03000000 @ point to LEDs
orr r12, r12, #0x00020000
orr r12, r12, #0xba00
/* Ensure all interrupts are off and MMU disabled */
mrs r0, cpsr
orr r0, r0, #0xc0
- msr cpsr_cxsf, r0
+ msr cpsr, r0
adr lr, 1b
orr lr, lr, #0x10000000
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_HOTPLUG is not set
+CONFIG_LOG_BUF_SHIFT=16
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE_PB is not set
+CONFIG_ARCH_S3C2410=y
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
-# CONFIG_ARCH_H1940 is not set
-# CONFIG_ARCH_SMDK2410 is not set
-CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+# CONFIG_HOTPLUG is not set
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
-# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
-CONFIG_S3C2410_DMA=y
-# CONFIG_S3C2410_DMA_DEBUG is not set
CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
CONFIG_ALIGNMENT_TRAP=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
+# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
+# CONFIG_NET_RADIO is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
-# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDE_BAST=y
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN is not set
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=y
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PARKBD is not set
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
-# CONFIG_SERIAL_BAST_SIO is not set
+# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_ISA is not set
-# CONFIG_I2C_PARPORT is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PHILIPSPAR is not set
# CONFIG_SCx200_ACB is not set
#
-# Hardware Sensors Chip support
+# I2C Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_GL518SM is not set
+CONFIG_SENSORS_EEPROM=m
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM78=m
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
#
-# Other I2C Chip support
+# L3 serial bus support
#
-CONFIG_SENSORS_EEPROM=m
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_L3 is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
#
# File systems
#
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
+CONFIG_NLS=y
#
# Native Language Support
#
-CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Sound
+# Misc devices
#
-# CONFIG_SOUND is not set
#
-# Misc devices
+# Multimedia Capabilities Port drivers
#
+# CONFIG_MCP is not set
#
-# USB support
+# Console Switches
#
+# CONFIG_SWITCHES is not set
#
-# USB Gadget Support
+# USB support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
-# CONFIG_DEBUG_ICEDCC is not set
+CONFIG_DEBUG_LL_PRINTK=y
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
# CONFIG_CPU_IS_SLOW is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_ECONET_AUNUDP is not set
# CONFIG_ECONET_NATIVE is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_ECONET_AUNUDP=y
CONFIG_ECONET_NATIVE=y
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#
CONFIG_EXPERIMENTAL=y
# CONFIG_CLEAN_COMPILE is not set
+CONFIG_STANDALONE=y
CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE_PB is not set
+CONFIG_ARCH_S3C2410=y
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
CONFIG_ARCH_H1940=y
-CONFIG_ARCH_SMDK2410=y
-CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+# CONFIG_HOTPLUG is not set
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
-# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
+# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
-CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
# Self-contained MTD device drivers
#
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
# CONFIG_MTD_BLKMTD is not set
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
+# CONFIG_NET_RADIO is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
-# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDE_BAST=y
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN is not set
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
# CONFIG_DIGI is not set
# CONFIG_MOXA_INTELLIO is not set
# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_RISCOM8 is not set
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
CONFIG_SERIAL_BAST_SIO=y
+# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_ISA is not set
-# CONFIG_I2C_PARPORT is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PHILIPSPAR is not set
# CONFIG_SCx200_ACB is not set
#
-# Hardware Sensors Chip support
+# I2C Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_GL518SM is not set
+CONFIG_SENSORS_EEPROM=m
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
-# CONFIG_SENSORS_LM77 is not set
CONFIG_SENSORS_LM78=m
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
#
-# Other I2C Chip support
+# L3 serial bus support
#
-CONFIG_SENSORS_EEPROM=m
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_L3 is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
#
# File systems
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_JFFS_FS=y
CONFIG_JFFS_FS_VERBOSE=0
-# CONFIG_JFFS_PROC_FS is not set
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
# CONFIG_JFFS2_FS_NAND is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
+CONFIG_NLS=y
#
# Native Language Support
#
-CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Sound
+# Misc devices
#
-# CONFIG_SOUND is not set
#
-# Misc devices
+# Multimedia Capabilities Port drivers
#
+# CONFIG_MCP is not set
#
-# USB support
+# Console Switches
#
+# CONFIG_SWITCHES is not set
#
-# USB Gadget Support
+# USB support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_PRINTK=y
# CONFIG_DEBUG_ICEDCC is not set
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
return err;
}
-static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t apm_read(struct file *fp, char *buf, size_t count, loff_t *ppos)
{
struct apm_user *as = fp->private_data;
apm_event_t event;
bl do_DataAbort
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
strne r0, [r0, -r0] @ bug()
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.ltorg
1: disable_irq r0
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr_cxsf, lr
+ msr spsr, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
.align 5
bl do_PrefetchAbort @ call abort handler
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
ands lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
ldr r0, [sp, #S_PSR] @ Get calling cpsr
sub lr, lr, #4
str lr, [r8]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
ldr lr, [sp, #S_PC] @ Get PC
ldr r1, [sp, #S_PSR] @ Get calling cpsr
disable_irq ip @ disable IRQs
ldr lr, [sp, #S_PC]! @ Get PC
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro fast_restore_user_regs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
-void _memcpy_fromio(void *to, unsigned long from, size_t count)
+void _memcpy_fromio(void * to, unsigned long from, size_t count)
{
- unsigned char *t = to;
while (count) {
count--;
- *t = readb(from);
- t++;
+ *(char *) to = readb(from);
+ ((char *) to)++;
from++;
}
}
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
-void _memcpy_toio(unsigned long to, const void *from, size_t count)
+void _memcpy_toio(unsigned long to, const void * from, size_t count)
{
- const unsigned char *f = from;
while (count) {
count--;
- writeb(*f, to);
- f++;
+ writeb(*(char *) from, to);
+ ((char *) from)++;
to++;
}
}
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
+ info.si_addr = (void *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
__put_user_error(NULL, &frame->uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
- stack.ss_sp = (void __user *)current->sas_ss_sp;
+ stack.ss_sp = (void *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ if (get_user(fourth.__pad, (void __user **) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
unsigned int instr;
struct undef_hook *hook;
siginfo_t info;
- void __user *pc;
+ void *pc;
/*
* According to the ARM ARM, PC is 2 or 4 bytes ahead,
*/
regs->ARM_pc -= correction;
- pc = (void __user *)instruction_pointer(regs);
+ pc = (void *)instruction_pointer(regs);
if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ get_user(instr, (u16 *)pc);
} else {
- get_user(instr, (u32 __user *)pc);
+ get_user(instr, (u32 *)pc);
}
spin_lock_irq(&undef_lock);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
+ info.si_addr = (void *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
+ info.si_addr = (void *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)addr;
+ info.si_addr = (void *)addr;
force_sig_info(SIGILL, &info, current);
die_if_kernel("unknown data abort code", regs, instr);
#define CPSR2SPSR(rt) \
mrs rt, cpsr; \
- msr spsr_cxsf, rt
+ msr spsr, rt
@ Purpose: call an expansion card loader to read bytes.
@ Proto : char read_loader(int offset, char *card_base, char *loader);
EXPORT_SYMBOL(pci_set_dma_mask);
EXPORT_SYMBOL(pci_dac_set_dma_mask);
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
-EXPORT_SYMBOL(ixp4xx_pci_read);
-EXPORT_SYMBOL(ixp4xx_pci_write);
.flags = IORESOURCE_MEM,
};
-static struct platform_device coyote_flash = {
+static struct platform_device coyote_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &coyote_flash_resource,
};
-static struct platform_device *coyote_devices[] __initdata = {
- &coyote_flash
-};
-
static void __init coyote_init(void)
{
- platform_add_devices(&coyote_devices, ARRAY_SIZE(coyote_devices));
+ platform_add_device(&coyote_flash_device);
}
MACHINE_START(ADI_COYOTE, "ADI Engineering IXP4XX Coyote Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device ixdp425_flash = {
+static struct platform_device ixdp425_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.num_resources = 0
};
-static struct platform_device *ixdp425_devices[] __initdata = {
- &ixdp425_i2c_controller,
- &ixdp425_flash
-};
-
static void __init ixdp425_init(void)
{
- platform_add_devices(&ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
+ platform_add_device(&ixdp425_flash_device);
+ platform_add_device(&ixdp425_i2c_controller);
}
MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device prpmc1100_flash = {
+static struct platform_device prpmc1100_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &prpmc1100_flash_resource,
};
-static struct platform_device *prpmc1100_devices[] __initdata = {
- &prpmc1100_flash
-};
-
static void __init prpmc1100_init(void)
{
- platform_add_devices(&prpmc1100_devices, ARRAY_SIZE(prpmc1100_devices));
+ platform_add_device(&prpmc1100_flash_device);
}
MACHINE_START(PRPMC1100, "Motorola PrPMC1100")
# Object file lists.
-obj-y := time.o
+obj-y := fiq.o time.o
+# generic.o
obj-$(CONFIG_MACH_KEV7A400) += arch-kev7a400.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A400) += arch-lpd7a40x.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A404) += arch-lpd7a40x.o irq-lh7a404.o
+obj-$(CONFIG_MACH_LPD7A400) += arch-lpd7a40x.o ide-lpd7a40x.o irq-lh7a400.o
+obj-$(CONFIG_MACH_LPD7A404) += arch-lpd7a40x.o ide-lpd7a40x.o irq-lh7a404.o
obj-m :=
obj-n :=
#ifdef CONFIG_MACH_LPD7A400
extern void lh7a400_init_irq (void);
-extern void lh7a40x_init_time (void);
MACHINE_START (LPD7A400, "Logic Product Development LPD7A400-10")
MAINTAINER ("Marc Singer")
BOOT_PARAMS (0xc0000100)
MAPIO (lpd7a400_map_io)
INITIRQ (lh7a400_init_irq)
- INITTIME (lh7a40x_init_time)
INIT_MACHINE (lpd7a40x_init)
MACHINE_END
--- /dev/null
+/*
+ * linux/arch/arm/lib/lh7a400-fiqhandler.S
+ * Copyright (C) 2002, Lineo, Inc.
+ * based on linux/arch/arm/lib/floppydma.S, which is
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+ .global fiqhandler_end
+
+ @ register usage:
+ @ r8 &interrupt controller registers
+ @ r9 &gpio registers
+ @ r11 work
+ @ r12 work
+
+ENTRY(fiqhandler)
+
+ @ read the status register to find out which FIQ this is
+
+ ldr r12, [r8] @ intc->status
+ and r12, r12, #0xf @ only interested in low-order 4 bits
+
+ @ translate FIQ 0:3 to IRQ 23:26
+ @ disable this FIQ and enable the corresponding IRQ
+
+ str r12, [r8, #0xc] @ disable this FIQ
+ mov r12, r12, lsl #23 @ get the corresopnding IRQ bit
+ str r12, [r8, #0x8] @ enable that IRQ
+
+ subs pc, lr, #4
+fiqhandler_end:
+
--- /dev/null
+/* arch/arm/mach-lh7a40x/ide-lpd7a40x.c
+ *
+ * Copyright (C) 2004 Logic Product Development
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/ide.h>
+
+#include <asm/io.h>
+
+#define IOBARRIER_READ readl (IOBARRIER_VIRT)
+
+static u8 lpd7a40x_ide_inb (unsigned long port)
+{
+ u16 v = (u16) readw (port & ~0x1);
+ IOBARRIER_READ;
+ if (port & 0x1)
+ v >>= 8;
+ return v & 0xff;
+}
+
+static u16 lpd7a40x_ide_inw (unsigned long port)
+{
+ u16 v = (u16) readw (port);
+ IOBARRIER_READ;
+ return v;
+}
+
+static void lpd7a40x_ide_insw (unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *((u16*) addr)++ = (u16) readw (port);
+ IOBARRIER_READ;
+ }
+}
+
+static u32 lpd7a40x_ide_inl (unsigned long port)
+{
+ u32 v = (u16) readw (port);
+ IOBARRIER_READ;
+ v |= (u16) readw (port + 2);
+ IOBARRIER_READ;
+
+ return v;
+}
+
+static void lpd7a40x_ide_insl (unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *((u16*) addr)++ = (u16) readw (port);
+ IOBARRIER_READ;
+ *((u16*) addr)++ = (u16) readw (port + 2);
+ IOBARRIER_READ;
+ }
+}
+
+/* lpd7a40x_ide_outb -- this function is complicated by the fact that
+ * the user wants to be able to do byte IO and the hardware cannot.
+ * In order to write the high byte, we need to write a short. So, we
+ * read before writing in order to maintain the register values that
+ * shouldn't change. This isn't a good idea for the data IO registers
+ * since reading from them will not return the current value. We
+ * expect that this function handles the control register adequately.
+*/
+
+static void lpd7a40x_ide_outb (u8 valueUser, unsigned long port)
+{
+ /* Block writes to SELECT register. Draconian, but the only
+ * way to cope with this hardware configuration without
+ * modifying the SELECT_DRIVE call in the ide driver. */
+ if ((port & 0xf) == 0x6)
+ return;
+
+ if (port & 0x1) { /* Perform read before write. Only
+ * the COMMAND register needs
+ * this. */
+ u16 value = (u16) readw (port & ~0x1);
+ IOBARRIER_READ;
+ value = (value & 0x00ff) | (valueUser << 8);
+ writew (value, port & ~0x1);
+ IOBARRIER_READ;
+ }
+ else { /* Allow low-byte writes which seem to
+ * be OK. */
+ writeb (valueUser, port);
+ IOBARRIER_READ;
+ }
+}
+
+static void lpd7a40x_ide_outbsync (ide_drive_t *drive, u8 value,
+ unsigned long port)
+{
+ lpd7a40x_ide_outb (value, port);
+}
+
+static void lpd7a40x_ide_outw (u16 value, unsigned long port)
+{
+ writew (value, port);
+ IOBARRIER_READ;
+}
+
+static void lpd7a40x_ide_outsw (unsigned long port, void *addr, u32 count)
+{
+ while (count-- > 0) {
+ writew (*((u16*) addr)++, port);
+ IOBARRIER_READ;
+ }
+}
+
+static void lpd7a40x_ide_outl (u32 value, unsigned long port)
+{
+ writel (value, port);
+ IOBARRIER_READ;
+}
+
+static void lpd7a40x_ide_outsl (unsigned long port, void *addr, u32 count)
+{
+ while (count-- > 0) {
+ writel (*((u32*) addr)++, port);
+ IOBARRIER_READ;
+ }
+}
+
+void lpd7a40x_SELECT_DRIVE (ide_drive_t *drive)
+{
+ unsigned jifStart = jiffies;
+#define WAIT_TIME (30*HZ/1000)
+
+ /* Check for readiness. */
+ while ((HWIF(drive)->INB(IDE_STATUS_REG) & 0x40) == 0)
+ if (jifStart <= jiffies + WAIT_TIME)
+ return;
+
+ /* Only allow one drive.
+ For more information, see Documentation/arm/Sharp-LH/ */
+ if (drive->select.all & (1<<4))
+ return;
+
+ /* OUTW so that the IDLE_IMMEDIATE (and not NOP) command is sent. */
+ HWIF(drive)->OUTW(drive->select.all | 0xe100, IDE_SELECT_REG);
+}
+
+void lpd7a40x_hwif_ioops (ide_hwif_t *hwif)
+{
+ hwif->mmio = 2; /* Just for show */
+ hwif->irq = IDE_NO_IRQ; /* Stop this probing */
+
+ hwif->OUTB = lpd7a40x_ide_outb;
+ hwif->OUTBSYNC = lpd7a40x_ide_outbsync;
+ hwif->OUTW = lpd7a40x_ide_outw;
+ hwif->OUTL = lpd7a40x_ide_outl;
+ hwif->OUTSW = lpd7a40x_ide_outsw;
+ hwif->OUTSL = lpd7a40x_ide_outsl;
+ hwif->INB = lpd7a40x_ide_inb;
+ hwif->INW = lpd7a40x_ide_inw;
+ hwif->INL = lpd7a40x_ide_inl;
+ hwif->INSW = lpd7a40x_ide_insw;
+ hwif->INSL = lpd7a40x_ide_insl;
+ hwif->selectproc = lpd7a40x_SELECT_DRIVE;
+}
<http://www.fsforth.de>
config MACH_VR1000
- bool "Thorcom VR1000"
+ bool "Simtec VR1000"
help
- Say Y here if you are using the Thorcom VR1000 board.
-
- This linux port is currently being maintained by Simtec, on behalf
- of Thorcom. Any queries, please contact Thorcom first.
+ Say Y here if you are using the Simtec VR1000 board.
endmenu
# Object file lists.
-obj-y := s3c2410.o irq.o time.o gpio.o
+obj-y := s3c2410.o irq.o time.o
obj-m :=
obj-n :=
obj- :=
+++ /dev/null
-/* linux/arch/arm/mach-s3c2410/gpio.c
- *
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 GPIO support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-
-#include <asm/hardware.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-
-#include <asm/arch/regs-gpio.h>
-
-void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long shift = 1;
- unsigned long mask = 3;
- unsigned long con;
- unsigned long flags;
-
- if (pin < S3C2410_GPIO_BANKB) {
- shift = 0;
- mask = 1;
- }
-
- mask <<= S3C2410_GPIO_OFFSET(pin);
-
- local_irq_save(flags);
-
- con = __raw_readl(base + 0x00);
-
- con &= mask << shift;
- con |= function;
-
- __raw_writel(con, base + 0x00);
-
- local_irq_restore(flags);
-}
-
-void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long up;
-
- if (pin < S3C2410_GPIO_BANKB)
- return;
-
- local_irq_save(flags);
-
- up = __raw_readl(base + 0x08);
- up &= 1 << offs;
- up |= to << offs;
- __raw_writel(up, base + 0x08);
-
- local_irq_restore(flags);
-}
-
-void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long dat;
-
- local_irq_save(flags);
-
- dat = __raw_readl(base + 0x04);
- dat &= 1 << offs;
- dat |= to << offs;
- __raw_writel(dat, base + 0x04);
-
- local_irq_restore(flags);
-}
[0] = {
.hwport = 0,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
[1] = {
.hwport = 1,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x245,
.ulcon = 0x03,
.ufcon = 0x00,
[2] = {
.hwport = 2,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
void __init smdk2410_init_time(void)
{
- s3c2410_init_time();
+ s3c2401_init_time();
}
MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
* published by the Free Software Foundation.
*
* Modifications:
- * 06-Aug-2004 BJD Fixed call to time initialisation
* 12-Jul-2004 BJD Renamed machine
* 16-May-2003 BJD Created initial version
* 16-Aug-2003 BJD Fixed header files and copyright, added URL
void __init vr1000_init_time(void)
{
- s3c2410_init_time();
+ s3c2401_init_time();
}
MACHINE_START(VR1000, "Thorcom-VR1000")
extern void s3c2410_init_irq(void);
-extern void s3c2410_init_time(void);
+extern s3c2410_init_time(void);
MAPIO(collie_map_io)
INITIRQ(sa1100_init_irq)
INIT_MACHINE(collie_init)
- INITTIME(sa1100_init_time)
MACHINE_END
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
- unsigned int err_pos = 1;
unsigned int cache_type;
int i;
unregister_undef_hook(&blockops_hook);
- for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
- printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
+ for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
+ printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
/* We must not map this if we have highmem enabled */
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08lx", pte_val(*pte));
+#ifdef CONFIG_CPU_32
printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
+#endif
pte_unmap(pte);
#endif
} while(0);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = code;
- si.si_addr = (void __user *)addr;
+ si.si_addr = (void *)addr;
force_sig_info(SIGSEGV, &si, tsk);
}
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
+#ifdef CONFIG_CPU_32
+#define TABLE_OFFSET (PTRS_PER_PTE)
+#else
+#define TABLE_OFFSET 0
+#endif
+
+#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
*/
reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+#ifdef CONFIG_CPU_32
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(pgd_t));
-
+#endif
/*
* And don't forget to reserve the allocator bitmap,
* which will be freed later.
*/
arch_adjust_zones(node, zone_size, zhole_size);
- free_area_init_node(node, pgdat, NULL, zone_size,
+ free_area_init_node(node, pgdat, 0, zone_size,
bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
}
* stack+task struct. Use the same method as 'current' uses to
* reach them.
*/
-register unsigned long *user_registers asm("sl");
+register unsigned int *user_registers asm("sl");
#define GET_USERREG() (user_registers)
#include <asm/uaccess.h>
-static inline void loadSingle(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadSingle(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
fpa11->fType[Fn] = typeSingle;
get_user(fpa11->fpreg[Fn].fSingle, pMem);
}
-static inline void loadDouble(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadDouble(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void loadExtended(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadExtended(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#endif
-static inline void loadMultiple(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadMultiple(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int *p;
}
}
-static inline void storeSingle(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeSingle(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
put_user(val.i[0], pMem);
}
-static inline void storeDouble(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeDouble(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeExtended(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#endif
-static inline void storeMultiple(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeMultiple(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int nType, *p;
unsigned int PerformLDF(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
+ unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return nRc;
}
unsigned int PerformSTF(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
+ unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
+ write_back = WRITE_BACK(opcode);
SetRoundingMode(ROUND_TO_NEAREST);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return nRc;
}
unsigned int PerformLFM(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int i, Fd, write_back = WRITE_BACK(opcode);
+ unsigned int i, Fd, *pBase, *pAddress, *pFinal,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return 1;
}
unsigned int PerformSFM(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int i, Fd, write_back = WRITE_BACK(opcode);
+ unsigned int i, Fd, *pBase, *pAddress, *pFinal,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return 1;
}
#ifdef CONFIG_DEBUG_USER
printk(KERN_DEBUG
- "NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
+ "NWFPE: %s[%d] takes exception %08x at %p from %08x\n",
current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()[15]);
#endif
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-static inline unsigned long readRegister(const unsigned int nReg)
+extern __inline__
+unsigned int readRegister(const unsigned int nReg)
{
/* Note: The CPU thinks it has dealt with the current instruction.
As a result the program counter has been advanced to the next
for this in this routine. LDF/STF instructions with Rn = PC
depend on the PC being correct, as they use PC+8 in their
address calculations. */
- unsigned long *userRegisters = GET_USERREG();
+ unsigned int *userRegisters = GET_USERREG();
unsigned int val = userRegisters[nReg];
if (REG_PC == nReg)
val -= 4;
return val;
}
-static inline void
-writeRegister(const unsigned int nReg, const unsigned long val)
+extern __inline__
+void writeRegister(const unsigned int nReg, const unsigned int val)
{
- unsigned long *userRegisters = GET_USERREG();
+ unsigned int *userRegisters = GET_USERREG();
userRegisters[nReg] = val;
}
-static inline unsigned long readCPSR(void)
+extern __inline__
+unsigned int readCPSR(void)
{
return (readRegister(REG_CPSR));
}
-static inline void writeCPSR(const unsigned long val)
+extern __inline__
+void writeCPSR(const unsigned int val)
{
writeRegister(REG_CPSR, val);
}
-static inline unsigned long readConditionCodes(void)
+extern __inline__
+unsigned int readConditionCodes(void)
{
#ifdef __FPEM_TEST__
return (0);
#endif
}
-static inline void writeConditionCodes(const unsigned long val)
+extern __inline__
+void writeConditionCodes(const unsigned int val)
{
- unsigned long *userRegisters = GET_USERREG();
- unsigned long rval;
+ unsigned int *userRegisters = GET_USERREG();
+ unsigned int rval;
/*
* Operate directly on userRegisters since
* the CPSR may be the PC register itself.
rval = userRegisters[REG_CPSR] & ~CC_MASK;
userRegisters[REG_CPSR] = rval | (val & CC_MASK);
}
+
+extern __inline__
+unsigned int readMemoryInt(unsigned int *pMem)
+{
+ return *pMem;
+}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
menu "Kernel hacking"
-config CRASH_DUMP
- tristate "Crash dump support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- default n
- ---help---
- Say Y here to enable saving an image of system memory when a panic
- or other error occurs. Dumps can also be forced with the SysRq+d
- key if MAGIC_SYSRQ is enabled.
-
-config CRASH_DUMP_BLOCKDEV
- tristate "Crash dump block device driver"
- depends on CRASH_DUMP
- help
- Say Y to allow saving crash dumps directly to a disk device.
-
-config CRASH_DUMP_NETDEV
- tristate "Crash dump network device driver"
- depends on CRASH_DUMP
- help
- Say Y to allow saving crash dumps over a network device.
-
-config CRASH_DUMP_MEMDEV
- bool "Crash dump staged memory driver"
- depends on CRASH_DUMP
- help
- Say Y to allow intermediate saving crash dumps in spare
- memory pages which would then be written out to disk
- later.
-
-config CRASH_DUMP_SOFTBOOT
- bool "Save crash dump across a soft reboot"
- depends on CRASH_DUMP_MEMDEV
- help
- Say Y to allow a crash dump to be preserved in memory
- pages across a soft reboot and written out to disk
- thereafter. For this to work, CRASH_DUMP must be
- configured as part of the kernel (not as a module).
-
-config CRASH_DUMP_COMPRESS_RLE
- tristate "Crash dump RLE compression"
- depends on CRASH_DUMP
- help
- Say Y to allow saving dumps with Run Length Encoding compression.
-
-config CRASH_DUMP_COMPRESS_GZIP
- tristate "Crash dump GZIP compression"
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
- depends on CRASH_DUMP
- help
- Say Y to allow saving dumps with Gnu Zip compression.
-
config DEBUG_KERNEL
bool "Kernel debugging"
help
libs-y += arch/i386/lib/
core-y += arch/i386/kernel/ \
arch/i386/mm/ \
- arch/i386/$(mcore-y)/ \
- arch/i386/crypto/
+ arch/i386/$(mcore-y)/
drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
drivers-$(CONFIG_PCI) += arch/i386/pci/
# must be linked after kernel/
install: $(BOOTIMAGE)
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
- if [ -f init/kerntypes.o ]; then cp init/kerntypes.o $(INSTALL_PATH)/Kerntypes; fi
--- /dev/null
+#
+# linux/arch/i386/boot/compressed/Makefile
+#
+# create a compressed vmlinux image from the original vmlinux
+#
+
+targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+EXTRA_AFLAGS := -traditional
+
+LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32
+
+$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+ $(call if_changed,ld)
+ @:
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
+
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,ld)
--- /dev/null
+/*
+ * linux/boot/head.S
+ *
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ */
+
+/*
+ * head.S contains the 32-bit startup code.
+ *
+ * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
+ * the page directory will exist. The startup code will be overwritten by
+ * the page directory. [According to comments etc elsewhere on a compressed
+ * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
+ *
+ * Page 0 is deliberately kept safe, since System Management Mode code in
+ * laptops may need to access the BIOS data stored there. This is also
+ * useful for future device drivers that either access the BIOS via VM86
+ * mode.
+ */
+
+/*
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+.text
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+
+ .globl startup_32
+
+startup_32:
+ cld
+ cli
+ movl $(__BOOT_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+
+ lss stack_start,%esp
+ xorl %eax,%eax
+1: incl %eax # check that A20 really IS enabled
+ movl %eax,0x000000 # loop forever if it isn't
+ cmpl %eax,0x100000
+ je 1b
+
+/*
+ * Initialize eflags. Some BIOS's leave bits like NT set. This would
+ * confuse the debugger if this code is traced.
+ * XXX - best to initialize before switching to protected mode.
+ */
+ pushl $0
+ popfl
+/*
+ * Clear BSS
+ */
+ xorl %eax,%eax
+ movl $_edata,%edi
+ movl $_end,%ecx
+ subl %edi,%ecx
+ cld
+ rep
+ stosb
+/*
+ * Do the decompression, and jump to the new kernel..
+ */
+ subl $16,%esp # place for structure on the stack
+ movl %esp,%eax
+ pushl %esi # real mode pointer as second arg
+ pushl %eax # address of structure as first arg
+ call decompress_kernel
+ orl %eax,%eax
+ jnz 3f
+ popl %esi # discard address
+ popl %esi # real mode pointer
+ xorl %ebx,%ebx
+ ljmp $(__BOOT_CS), $0x100000
+
+/*
+ * We come here, if we were loaded high.
+ * We need to move the move-in-place routine down to 0x1000
+ * and then start it with the buffer addresses in registers,
+ * which we got from the stack.
+ */
+3:
+ movl $move_routine_start,%esi
+ movl $0x1000,%edi
+ movl $move_routine_end,%ecx
+ subl %esi,%ecx
+ addl $3,%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+
+ popl %esi # discard the address
+ popl %ebx # real mode pointer
+ popl %esi # low_buffer_start
+ popl %ecx # lcount
+ popl %edx # high_buffer_start
+ popl %eax # hcount
+ movl $0x100000,%edi
+ cli # make sure we don't get interrupted
+ ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
+
+/*
+ * Routine (template) for moving the decompressed kernel in place,
+ * if we were high loaded. This _must_ PIC-code !
+ */
+move_routine_start:
+ movl %ecx,%ebp
+ shrl $2,%ecx
+ rep
+ movsl
+ movl %ebp,%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ movl %edx,%esi
+ movl %eax,%ecx # NOTE: rep movsb won't move if %ecx == 0
+ addl $3,%ecx
+ shrl $2,%ecx
+ rep
+ movsl
+ movl %ebx,%esi # Restore setup pointer
+ xorl %ebx,%ebx
+ ljmp $(__BOOT_CS), $0x100000
+move_routine_end:
--- /dev/null
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ * puts by Nick Holloway 1993, better puts by Martin Mares 1995
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+
+#include <linux/linkage.h>
+#include <linux/vmalloc.h>
+#include <linux/tty.h>
+#include <asm/io.h>
+#ifdef STANDARD_MEMORY_BIOS_CALL
+#undef STANDARD_MEMORY_BIOS_CALL
+#endif
+
+/*
+ * gzip declarations
+ */
+
+#define OF(args) args
+#define STATIC static
+
+#undef memset
+#undef memcpy
+
+/*
+ * Why do we do this? Don't ask me..
+ *
+ * Incomprehensible are the ways of bootloaders.
+ */
+static void* memset(void *, int, size_t);
+static void* memcpy(void *, __const void *, size_t);
+#define memzero(s, n) memset ((s), 0, (n))
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+#define WSIZE 0x8000 /* Window size must be at least 32k, */
+ /* and a power of two */
+
+static uch *inbuf; /* input buffer */
+static uch window[WSIZE]; /* Sliding window buffer */
+
+static unsigned insize = 0; /* valid bytes in inbuf */
+static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
+static unsigned outcnt = 0; /* bytes in output buffer */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+static int fill_inbuf(void);
+static void flush_window(void);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+static unsigned char *real_mode; /* Pointer to real-mode data */
+
+#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
+#ifndef STANDARD_MEMORY_BIOS_CALL
+#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
+#endif
+#define SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+
+extern char input_data[];
+extern int input_len;
+
+static long bytes_out = 0;
+static uch *output_data;
+static unsigned long output_ptr = 0;
+
+static void *malloc(int size);
+static void free(void *where);
+
+static void puts(const char *);
+
+extern int end;
+static long free_mem_ptr = (long)&end;
+static long free_mem_end_ptr;
+
+#define INPLACE_MOVE_ROUTINE 0x1000
+#define LOW_BUFFER_START 0x2000
+#define LOW_BUFFER_MAX 0x90000
+#define HEAP_SIZE 0x3000
+static unsigned int low_buffer_end, low_buffer_size;
+static int high_loaded =0;
+static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
+
+static char *vidmem = (char *)0xa0000;
+static int lines, cols;
+
+#ifdef CONFIG_X86_NUMAQ
+static void * xquad_portio = NULL;
+#endif
+
+#include "../../../../lib/inflate.c"
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size <0) error("Malloc error");
+ if (free_mem_ptr <= 0) error("Memory error");
+
+ free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
+
+ p = (void *)free_mem_ptr;
+ free_mem_ptr += size;
+
+ if (free_mem_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ return p;
+}
+
+static void free(void *where)
+{ /* Don't care */
+}
+
+static void gzip_mark(void **ptr)
+{
+ *ptr = (void *) free_mem_ptr;
+}
+
+static void gzip_release(void **ptr)
+{
+ free_mem_ptr = (long) *ptr;
+}
+
+static void scroll(void)
+{
+ int i;
+
+ memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
+ for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+ vidmem[i] = ' ';
+}
+
+static void puts(const char *s)
+{
+ int x,y,pos;
+ char c;
+
+ x = SCREEN_INFO.orig_x;
+ y = SCREEN_INFO.orig_y;
+
+ while ( ( c = *s++ ) != '\0' ) {
+ if ( c == '\n' ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ } else {
+ vidmem [ ( x + cols * y ) * 2 ] = c;
+ if ( ++x >= cols ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ }
+ }
+ }
+
+ SCREEN_INFO.orig_x = x;
+ SCREEN_INFO.orig_y = y;
+
+ pos = x + cols * y; /* Update cursor position */
+ while (!(inb_p(0x60) & 4));
+ outb_p(0x49, 0x62);
+ outb_p(pos & 0xff, 0x60);
+ outb_p((pos >> 8) & 0xff, 0x60);
+}
+
+static void* memset(void* s, int c, size_t n)
+{
+ int i;
+ char *ss = (char*)s;
+
+ for (i=0;i<n;i++) ss[i] = c;
+ return s;
+}
+
+static void* memcpy(void* __dest, __const void* __src,
+ size_t __n)
+{
+ int i;
+ char *d = (char *)__dest, *s = (char *)__src;
+
+ for (i=0;i<__n;i++) d[i] = s[i];
+ return __dest;
+}
+
+/* ===========================================================================
+ * Fill the input buffer. This is called only when the buffer is empty
+ * and at least one byte is really needed.
+ */
+static int fill_inbuf(void)
+{
+ if (insize != 0) {
+ error("ran out of input data");
+ }
+
+ inbuf = input_data;
+ insize = input_len;
+ inptr = 1;
+ return inbuf[0];
+}
+
+/* ===========================================================================
+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+ * (Used for the decompressed data only.)
+ */
+static void flush_window_low(void)
+{
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, *out, ch;
+
+ in = window;
+ out = &output_data[output_ptr];
+ for (n = 0; n < outcnt; n++) {
+ ch = *out++ = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ output_ptr += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void flush_window_high(void)
+{
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, ch;
+ in = window;
+ for (n = 0; n < outcnt; n++) {
+ ch = *output_data++ = *in++;
+ if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void flush_window(void)
+{
+ if (high_loaded) flush_window_high();
+ else flush_window_low();
+}
+
+static void error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+#define STACK_SIZE (4096)
+
+long user_stack [STACK_SIZE];
+
+struct {
+ long * a;
+ short b;
+ } stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
+
+static void setup_normal_output_buffer(void)
+{
+#ifdef STANDARD_MEMORY_BIOS_CALL
+ if (EXT_MEM_K < 1024) error("Less than 2MB of memory");
+#else
+ if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
+#endif
+ output_data = (char *)0x100000; /* Points to 1M */
+ free_mem_end_ptr = (long)real_mode;
+}
+
+struct moveparams {
+ uch *low_buffer_start; int lcount;
+ uch *high_buffer_start; int hcount;
+};
+
+static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
+{
+ high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
+#ifdef STANDARD_MEMORY_BIOS_CALL
+ if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
+#else
+ if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
+#endif
+ mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START;
+ low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
+ ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
+ low_buffer_size = low_buffer_end - LOW_BUFFER_START;
+ high_loaded = 1;
+ free_mem_end_ptr = (long)high_buffer_start;
+ if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) {
+ high_buffer_start = (uch *)(0x100000 + low_buffer_size);
+ mv->hcount = 0; /* say: we need not to move high_buffer */
+ }
+ else mv->hcount = -1;
+ mv->high_buffer_start = high_buffer_start;
+}
+
+static void close_output_buffer_if_we_run_high(struct moveparams *mv)
+{
+ if (bytes_out > low_buffer_size) {
+ mv->lcount = low_buffer_size;
+ if (mv->hcount)
+ mv->hcount = bytes_out - low_buffer_size;
+ } else {
+ mv->lcount = bytes_out;
+ mv->hcount = 0;
+ }
+}
+
+
+asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
+{
+ real_mode = rmode;
+
+ vidmem = (char *)(((unsigned int)SCREEN_INFO.orig_video_page) << 4);
+
+ lines = SCREEN_INFO.orig_video_lines;
+ cols = SCREEN_INFO.orig_video_cols;
+
+ if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
+ else setup_output_buffer_if_we_run_high(mv);
+
+ makecrc();
+ puts("Uncompressing Linux... ");
+ gunzip();
+ puts("Ok, booting the kernel.\n");
+ if (high_loaded) close_output_buffer_if_we_run_high(mv);
+ return high_loaded;
+}
+
+/* We don't actually check for stack overflows this early. */
+__asm__(".globl mcount ; mcount: ret\n");
+
--- /dev/null
+SECTIONS
+{
+ .data : {
+ input_len = .;
+ LONG(input_data_end - input_data) input_data = .;
+ *(.data)
+ input_data_end = .;
+ }
+}
--- /dev/null
+/*
+ * $Id: build.c,v 1.5 1997/05/19 12:29:58 mj Exp $
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares
+ */
+
+/*
+ * This file builds a disk-image from three different files:
+ *
+ * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest
+ * - setup: 8086 machine code, sets up system parm
+ * - system: 80386 code for actual system
+ *
+ * It does some checking that all files are of the correct type, and
+ * just writes the result to stdout, removing headers and padding to
+ * the right amount. It also writes some system data to stderr.
+ */
+
+/*
+ * Changes by tytso to allow root device specification
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ * Cross compiling fixes by Gertjan van Wingerde, July 1996
+ * Rewritten by Martin Mares, April 1997
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <asm/boot.h>
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long u32;
+
+#define DEFAULT_MAJOR_ROOT 0
+#define DEFAULT_MINOR_ROOT 0
+
+/* Minimal number of setup sectors (see also bootsect.S) */
+#define SETUP_SECTS 4
+
+byte buf[1024];
+int fd;
+int is_big_kernel;
+
+void die(const char * str, ...)
+{
+ va_list args;
+ va_start(args, str);
+ vfprintf(stderr, str, args);
+ fputc('\n', stderr);
+ exit(1);
+}
+
+void file_open(const char *name)
+{
+ if ((fd = open(name, O_RDONLY, 0)) < 0)
+ die("Unable to open `%s': %m", name);
+}
+
+void usage(void)
+{
+ die("Usage: build [-b] bootsect setup system [rootdev] [> image]");
+}
+
+int main(int argc, char ** argv)
+{
+ unsigned int i, c, sz, setup_sectors;
+ u32 sys_size;
+ byte major_root, minor_root;
+ struct stat sb;
+
+ if (argc > 2 && !strcmp(argv[1], "-b"))
+ {
+ is_big_kernel = 1;
+ argc--, argv++;
+ }
+ if ((argc < 4) || (argc > 5))
+ usage();
+ if (argc > 4) {
+ if (!strcmp(argv[4], "CURRENT")) {
+ if (stat("/", &sb)) {
+ perror("/");
+ die("Couldn't stat /");
+ }
+ major_root = major(sb.st_dev);
+ minor_root = minor(sb.st_dev);
+ } else if (strcmp(argv[4], "FLOPPY")) {
+ if (stat(argv[4], &sb)) {
+ perror(argv[4]);
+ die("Couldn't stat root device.");
+ }
+ major_root = major(sb.st_rdev);
+ minor_root = minor(sb.st_rdev);
+ } else {
+ major_root = 0;
+ minor_root = 0;
+ }
+ } else {
+ major_root = DEFAULT_MAJOR_ROOT;
+ minor_root = DEFAULT_MINOR_ROOT;
+ }
+ fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
+
+ file_open(argv[1]);
+ i = read(fd, buf, sizeof(buf));
+ fprintf(stderr,"Boot sector %d bytes.\n",i);
+ if (i != 512)
+ die("Boot block must be exactly 512 bytes");
+ if (buf[510] != 0x55 || buf[511] != 0xaa)
+ die("Boot block hasn't got boot flag (0xAA55)");
+ buf[508] = minor_root;
+ buf[509] = major_root;
+ if (write(1, buf, 512) != 512)
+ die("Write call failed");
+ close (fd);
+
+ file_open(argv[2]); /* Copy the setup code */
+ for (i=0 ; (c=read(fd, buf, sizeof(buf)))>0 ; i+=c )
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ if (c != 0)
+ die("read-error on `setup'");
+ close (fd);
+
+ setup_sectors = (i + 511) / 512; /* Pad unused space with zeros */
+ if (!(setup_sectors & 1))
+ setup_sectors++; /* setup_sectors must be odd on NEC PC-9800 */
+ fprintf(stderr, "Setup is %d bytes.\n", i);
+ memset(buf, 0, sizeof(buf));
+ while (i < setup_sectors * 512) {
+ c = setup_sectors * 512 - i;
+ if (c > sizeof(buf))
+ c = sizeof(buf);
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ i += c;
+ }
+
+ file_open(argv[3]);
+ if (fstat (fd, &sb))
+ die("Unable to stat `%s': %m", argv[3]);
+ sz = sb.st_size;
+ fprintf (stderr, "System is %d kB\n", sz/1024);
+ sys_size = (sz + 15) / 16;
+ /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */
+ if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE))
+ die("System is too big. Try using %smodules.",
+ is_big_kernel ? "" : "bzImage or ");
+ while (sz > 0) {
+ int l, n;
+
+ l = (sz > sizeof(buf)) ? sizeof(buf) : sz;
+ if ((n=read(fd, buf, l)) != l) {
+ if (n < 0)
+ die("Error reading %s: %m", argv[3]);
+ else
+ die("%s: Unexpected EOF", argv[3]);
+ }
+ if (write(1, buf, l) != l)
+ die("Write failed");
+ sz -= l;
+ }
+ close(fd);
+
+ if (lseek(1, 497, SEEK_SET) != 497) /* Write sizes to the bootsector */
+ die("Output: seek failed");
+ buf[0] = setup_sectors;
+ if (write(1, buf, 1) != 1)
+ die("Write of setup sector count failed");
+ if (lseek(1, 500, SEEK_SET) != 500)
+ die("Output: seek failed");
+ buf[0] = (sys_size & 0xff);
+ buf[1] = ((sys_size >> 8) & 0xff);
+ if (write(1, buf, 2) != 2)
+ die("Write of image length failed");
+
+ return 0; /* Everything is OK */
+}
+++ /dev/null
-#
-# i386/crypto/Makefile
-#
-# Arch-specific CryptoAPI modules.
-#
-
-obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
-
-aes-i586-y := aes-i586-asm.o aes.o
+++ /dev/null
-// -------------------------------------------------------------------------
-// Copyright (c) 2001, Dr Brian Gladman < >, Worcester, UK.
-// All rights reserved.
-//
-// LICENSE TERMS
-//
-// The free distribution and use of this software in both source and binary
-// form is allowed (with or without changes) provided that:
-//
-// 1. distributions of this source code include the above copyright
-// notice, this list of conditions and the following disclaimer//
-//
-// 2. distributions in binary form include the above copyright
-// notice, this list of conditions and the following disclaimer
-// in the documentation and/or other associated materials//
-//
-// 3. the copyright holder's name is not used to endorse products
-// built using this software without specific written permission.
-//
-//
-// ALTERNATIVELY, provided that this notice is retained in full, this product
-// may be distributed under the terms of the GNU General Public License (GPL),
-// in which case the provisions of the GPL apply INSTEAD OF those given above.
-//
-// Copyright (c) 2004 Linus Torvalds <torvalds@osdl.org>
-// Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
-
-// DISCLAIMER
-//
-// This software is provided 'as is' with no explicit or implied warranties
-// in respect of its properties including, but not limited to, correctness
-// and fitness for purpose.
-// -------------------------------------------------------------------------
-// Issue Date: 29/07/2002
-
-.file "aes-i586-asm.S"
-.text
-
-// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-
-#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
-
-// offsets to parameters with one register pushed onto stack
-
-#define in_blk 8 // input byte array address parameter
-#define out_blk 12 // output byte array address parameter
-#define ctx 16 // AES context structure
-
-// offsets in context structure
-
-#define ekey 0 // encryption key schedule base address
-#define nrnd 256 // number of rounds
-#define dkey 260 // decryption key schedule base address
-
-// register mapping for encrypt and decrypt subroutines
-
-#define r0 eax
-#define r1 ebx
-#define r2 ecx
-#define r3 edx
-#define r4 esi
-#define r5 edi
-#define r6 ebp
-
-#define eaxl al
-#define eaxh ah
-#define ebxl bl
-#define ebxh bh
-#define ecxl cl
-#define ecxh ch
-#define edxl dl
-#define edxh dh
-
-#define _h(reg) reg##h
-#define h(reg) _h(reg)
-
-#define _l(reg) reg##l
-#define l(reg) _l(reg)
-
-// This macro takes a 32-bit word representing a column and uses
-// each of its four bytes to index into four tables of 256 32-bit
-// words to obtain values that are then xored into the appropriate
-// output registers r0, r1, r4 or r5.
-
-// Parameters:
-// %1 out_state[0]
-// %2 out_state[1]
-// %3 out_state[2]
-// %4 out_state[3]
-// %5 table base address
-// %6 input register for the round (destroyed)
-// %7 scratch register for the round
-
-#define do_col(a1, a2, a3, a4, a5, a6, a7) \
- movzx %l(a6),%a7; \
- xor a5(,%a7,4),%a1; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+2*tlen(,%a7,4),%a3; \
- xor a5+3*tlen(,%a6,4),%a4;
-
-// initialise output registers from the key schedule
-
-#define do_fcol(a1, a2, a3, a4, a5, a6, a7, a8) \
- mov 0 a8,%a1; \
- movzx %l(a6),%a7; \
- mov 12 a8,%a2; \
- xor a5(,%a7,4),%a1; \
- mov 4 a8,%a4; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+3*tlen(,%a6,4),%a4; \
- mov %a3,%a6; \
- mov 8 a8,%a3; \
- xor a5+2*tlen(,%a7,4),%a3;
-
-// initialise output registers from the key schedule
-
-#define do_icol(a1, a2, a3, a4, a5, a6, a7, a8) \
- mov 0 a8,%a1; \
- movzx %l(a6),%a7; \
- mov 4 a8,%a2; \
- xor a5(,%a7,4),%a1; \
- mov 12 a8,%a4; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+3*tlen(,%a6,4),%a4; \
- mov %a3,%a6; \
- mov 8 a8,%a3; \
- xor a5+2*tlen(,%a7,4),%a3;
-
-
-// original Gladman had conditional saves to MMX regs.
-#define save(a1, a2) \
- mov %a2,4*a1(%esp)
-
-#define restore(a1, a2) \
- mov 4*a2(%esp),%a1
-
-// This macro performs a forward encryption cycle. It is entered with
-// the first previous round column values in r0, r1, r4 and r5 and
-// exits with the final values in the same registers, using the MMX
-// registers mm0-mm1 or the stack for temporary storage
-
-// mov current column values into the MMX registers
-#define fwd_rnd(arg, table) \
- /* mov current column values into the MMX registers */ \
- mov %r0,%r2; \
- save (0,r1); \
- save (1,r5); \
- \
- /* compute new column values */ \
- do_fcol(r0,r5,r4,r1,table, r2,r3, arg); \
- do_col (r4,r1,r0,r5,table, r2,r3); \
- restore(r2,0); \
- do_col (r1,r0,r5,r4,table, r2,r3); \
- restore(r2,1); \
- do_col (r5,r4,r1,r0,table, r2,r3);
-
-// This macro performs an inverse encryption cycle. It is entered with
-// the first previous round column values in r0, r1, r4 and r5 and
-// exits with the final values in the same registers, using the MMX
-// registers mm0-mm1 or the stack for temporary storage
-
-#define inv_rnd(arg, table) \
- /* mov current column values into the MMX registers */ \
- mov %r0,%r2; \
- save (0,r1); \
- save (1,r5); \
- \
- /* compute new column values */ \
- do_icol(r0,r1,r4,r5, table, r2,r3, arg); \
- do_col (r4,r5,r0,r1, table, r2,r3); \
- restore(r2,0); \
- do_col (r1,r4,r5,r0, table, r2,r3); \
- restore(r2,1); \
- do_col (r5,r0,r1,r4, table, r2,r3);
-
-// AES (Rijndael) Encryption Subroutine
-
-.global aes_enc_blk
-
-.extern ft_tab
-.extern fl_tab
-
-.align 4
-
-aes_enc_blk:
- push %ebp
- mov ctx(%esp),%ebp // pointer to context
- xor %eax,%eax
-
-// CAUTION: the order and the values used in these assigns
-// rely on the register mappings
-
-1: push %ebx
- mov in_blk+4(%esp),%r2
- push %esi
- mov nrnd(%ebp),%r3 // number of rounds
- push %edi
- lea ekey(%ebp),%r6 // key pointer
-
-// input four columns and xor in first round key
-
- mov (%r2),%r0
- mov 4(%r2),%r1
- mov 8(%r2),%r4
- mov 12(%r2),%r5
- xor (%r6),%r0
- xor 4(%r6),%r1
- xor 8(%r6),%r4
- xor 12(%r6),%r5
-
- sub $8,%esp // space for register saves on stack
- add $16,%r6 // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- add $32,%r6
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- add $32,%r6
-
-2: fwd_rnd( -64(%r6) ,ft_tab) // 14 rounds for 128-bit key
- fwd_rnd( -48(%r6) ,ft_tab)
-3: fwd_rnd( -32(%r6) ,ft_tab) // 12 rounds for 128-bit key
- fwd_rnd( -16(%r6) ,ft_tab)
-4: fwd_rnd( (%r6) ,ft_tab) // 10 rounds for 128-bit key
- fwd_rnd( +16(%r6) ,ft_tab)
- fwd_rnd( +32(%r6) ,ft_tab)
- fwd_rnd( +48(%r6) ,ft_tab)
- fwd_rnd( +64(%r6) ,ft_tab)
- fwd_rnd( +80(%r6) ,ft_tab)
- fwd_rnd( +96(%r6) ,ft_tab)
- fwd_rnd(+112(%r6) ,ft_tab)
- fwd_rnd(+128(%r6) ,ft_tab)
- fwd_rnd(+144(%r6) ,fl_tab) // last round uses a different table
-
-// move final values to the output array. CAUTION: the
-// order of these assigns rely on the register mappings
-
- add $8,%esp
- mov out_blk+12(%esp),%r6
- mov %r5,12(%r6)
- pop %edi
- mov %r4,8(%r6)
- pop %esi
- mov %r1,4(%r6)
- pop %ebx
- mov %r0,(%r6)
- pop %ebp
- mov $1,%eax
- ret
-
-// AES (Rijndael) Decryption Subroutine
-
-.global aes_dec_blk
-
-.extern it_tab
-.extern il_tab
-
-.align 4
-
-aes_dec_blk:
- push %ebp
- mov ctx(%esp),%ebp // pointer to context
- xor %eax,%eax
-
-// CAUTION: the order and the values used in these assigns
-// rely on the register mappings
-
-1: push %ebx
- mov in_blk+4(%esp),%r2
- push %esi
- mov nrnd(%ebp),%r3 // number of rounds
- push %edi
- lea dkey(%ebp),%r6 // key pointer
- mov %r3,%r0
- shl $4,%r0
- add %r0,%r6
-
-// input four columns and xor in first round key
-
- mov (%r2),%r0
- mov 4(%r2),%r1
- mov 8(%r2),%r4
- mov 12(%r2),%r5
- xor (%r6),%r0
- xor 4(%r6),%r1
- xor 8(%r6),%r4
- xor 12(%r6),%r5
-
- sub $8,%esp // space for register saves on stack
- sub $16,%r6 // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- sub $32,%r6
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- sub $32,%r6
-
-2: inv_rnd( +64(%r6), it_tab) // 14 rounds for 128-bit key
- inv_rnd( +48(%r6), it_tab)
-3: inv_rnd( +32(%r6), it_tab) // 12 rounds for 128-bit key
- inv_rnd( +16(%r6), it_tab)
-4: inv_rnd( (%r6), it_tab) // 10 rounds for 128-bit key
- inv_rnd( -16(%r6), it_tab)
- inv_rnd( -32(%r6), it_tab)
- inv_rnd( -48(%r6), it_tab)
- inv_rnd( -64(%r6), it_tab)
- inv_rnd( -80(%r6), it_tab)
- inv_rnd( -96(%r6), it_tab)
- inv_rnd(-112(%r6), it_tab)
- inv_rnd(-128(%r6), it_tab)
- inv_rnd(-144(%r6), il_tab) // last round uses a different table
-
-// move final values to the output array. CAUTION: the
-// order of these assigns rely on the register mappings
-
- add $8,%esp
- mov out_blk+12(%esp),%r6
- mov %r5,12(%r6)
- pop %edi
- mov %r4,8(%r6)
- pop %esi
- mov %r1,4(%r6)
- pop %ebx
- mov %r0,(%r6)
- pop %ebp
- mov $1,%eax
- ret
-
+++ /dev/null
-/*
- *
- * Glue Code for optimized 586 assembler version of AES
- *
- * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK.
- * All rights reserved.
- *
- * LICENSE TERMS
- *
- * The free distribution and use of this software in both source and binary
- * form is allowed (with or without changes) provided that:
- *
- * 1. distributions of this source code include the above copyright
- * notice, this list of conditions and the following disclaimer;
- *
- * 2. distributions in binary form include the above copyright
- * notice, this list of conditions and the following disclaimer
- * in the documentation and/or other associated materials;
- *
- * 3. the copyright holder's name is not used to endorse products
- * built using this software without specific written permission.
- *
- * ALTERNATIVELY, provided that this notice is retained in full, this product
- * may be distributed under the terms of the GNU General Public License (GPL),
- * in which case the provisions of the GPL apply INSTEAD OF those given above.
- *
- * DISCLAIMER
- *
- * This software is provided 'as is' with no explicit or implied warranties
- * in respect of its properties, including, but not limited to, correctness
- * and/or fitness for purpose.
- *
- * Copyright (c) 2003, Adam J. Richter <adam@yggdrasil.com> (conversion to
- * 2.5 API).
- * Copyright (c) 2003, 2004 Fruhwirth Clemens <clemens@endorphin.org>
- * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/linkage.h>
-
-asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx);
-asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx);
-
-#define AES_MIN_KEY_SIZE 16
-#define AES_MAX_KEY_SIZE 32
-#define AES_BLOCK_SIZE 16
-#define AES_KS_LENGTH 4 * AES_BLOCK_SIZE
-#define RC_LENGTH 29
-
-struct aes_ctx {
- u32 ekey[AES_KS_LENGTH];
- u32 rounds;
- u32 dkey[AES_KS_LENGTH];
-};
-
-#define WPOLY 0x011b
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define bytes2word(b0, b1, b2, b3) \
- (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
-
-/* define the finite field multiplies required for Rijndael */
-#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
-#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
-#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
-#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
-#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
-#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
-#define fi(x) ((x) ? pow[255 - log[x]]: 0)
-
-static inline u32 upr(u32 x, int n)
-{
- return (x << 8 * n) | (x >> (32 - 8 * n));
-}
-
-static inline u8 bval(u32 x, int n)
-{
- return x >> 8 * n;
-}
-
-/* The forward and inverse affine transformations used in the S-box */
-#define fwd_affine(x) \
- (w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8)))
-
-#define inv_affine(x) \
- (w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8)))
-
-static u32 rcon_tab[RC_LENGTH];
-
-u32 ft_tab[4][256];
-u32 fl_tab[4][256];
-u32 ls_tab[4][256];
-u32 im_tab[4][256];
-u32 il_tab[4][256];
-u32 it_tab[4][256];
-
-void gen_tabs(void)
-{
- u32 i, w;
- u8 pow[512], log[256];
-
- /*
- * log and power tables for GF(2^8) finite field with
- * WPOLY as modular polynomial - the simplest primitive
- * root is 0x03, used here to generate the tables.
- */
- i = 0; w = 1;
-
- do {
- pow[i] = (u8)w;
- pow[i + 255] = (u8)w;
- log[w] = (u8)i++;
- w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0);
- } while (w != 1);
-
- for(i = 0, w = 1; i < RC_LENGTH; ++i) {
- rcon_tab[i] = bytes2word(w, 0, 0, 0);
- w = f2(w);
- }
-
- for(i = 0; i < 256; ++i) {
- u8 b;
-
- b = fwd_affine(fi((u8)i));
- w = bytes2word(f2(b), b, b, f3(b));
-
- /* tables for a normal encryption round */
- ft_tab[0][i] = w;
- ft_tab[1][i] = upr(w, 1);
- ft_tab[2][i] = upr(w, 2);
- ft_tab[3][i] = upr(w, 3);
- w = bytes2word(b, 0, 0, 0);
-
- /*
- * tables for last encryption round
- * (may also be used in the key schedule)
- */
- fl_tab[0][i] = w;
- fl_tab[1][i] = upr(w, 1);
- fl_tab[2][i] = upr(w, 2);
- fl_tab[3][i] = upr(w, 3);
-
- /*
- * table for key schedule if fl_tab above is
- * not of the required form
- */
- ls_tab[0][i] = w;
- ls_tab[1][i] = upr(w, 1);
- ls_tab[2][i] = upr(w, 2);
- ls_tab[3][i] = upr(w, 3);
-
- b = fi(inv_affine((u8)i));
- w = bytes2word(fe(b), f9(b), fd(b), fb(b));
-
- /* tables for the inverse mix column operation */
- im_tab[0][b] = w;
- im_tab[1][b] = upr(w, 1);
- im_tab[2][b] = upr(w, 2);
- im_tab[3][b] = upr(w, 3);
-
- /* tables for a normal decryption round */
- it_tab[0][i] = w;
- it_tab[1][i] = upr(w,1);
- it_tab[2][i] = upr(w,2);
- it_tab[3][i] = upr(w,3);
-
- w = bytes2word(b, 0, 0, 0);
-
- /* tables for last decryption round */
- il_tab[0][i] = w;
- il_tab[1][i] = upr(w,1);
- il_tab[2][i] = upr(w,2);
- il_tab[3][i] = upr(w,3);
- }
-}
-
-#define four_tables(x,tab,vf,rf,c) \
-( tab[0][bval(vf(x,0,c),rf(0,c))] ^ \
- tab[1][bval(vf(x,1,c),rf(1,c))] ^ \
- tab[2][bval(vf(x,2,c),rf(2,c))] ^ \
- tab[3][bval(vf(x,3,c),rf(3,c))] \
-)
-
-#define vf1(x,r,c) (x)
-#define rf1(r,c) (r)
-#define rf2(r,c) ((r-c)&3)
-
-#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0)
-#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c)
-
-#define ff(x) inv_mcol(x)
-
-#define ke4(k,i) \
-{ \
- k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
- k[4*(i)+5] = ss[1] ^= ss[0]; \
- k[4*(i)+6] = ss[2] ^= ss[1]; \
- k[4*(i)+7] = ss[3] ^= ss[2]; \
-}
-
-#define kel4(k,i) \
-{ \
- k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
- k[4*(i)+5] = ss[1] ^= ss[0]; \
- k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \
-}
-
-#define ke6(k,i) \
-{ \
- k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 7] = ss[1] ^= ss[0]; \
- k[6*(i)+ 8] = ss[2] ^= ss[1]; \
- k[6*(i)+ 9] = ss[3] ^= ss[2]; \
- k[6*(i)+10] = ss[4] ^= ss[3]; \
- k[6*(i)+11] = ss[5] ^= ss[4]; \
-}
-
-#define kel6(k,i) \
-{ \
- k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 7] = ss[1] ^= ss[0]; \
- k[6*(i)+ 8] = ss[2] ^= ss[1]; \
- k[6*(i)+ 9] = ss[3] ^= ss[2]; \
-}
-
-#define ke8(k,i) \
-{ \
- k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 9] = ss[1] ^= ss[0]; \
- k[8*(i)+10] = ss[2] ^= ss[1]; \
- k[8*(i)+11] = ss[3] ^= ss[2]; \
- k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \
- k[8*(i)+13] = ss[5] ^= ss[4]; \
- k[8*(i)+14] = ss[6] ^= ss[5]; \
- k[8*(i)+15] = ss[7] ^= ss[6]; \
-}
-
-#define kel8(k,i) \
-{ \
- k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 9] = ss[1] ^= ss[0]; \
- k[8*(i)+10] = ss[2] ^= ss[1]; \
- k[8*(i)+11] = ss[3] ^= ss[2]; \
-}
-
-#define kdf4(k,i) \
-{ \
- ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
- ss[1] = ss[1] ^ ss[3]; \
- ss[2] = ss[2] ^ ss[3]; \
- ss[3] = ss[3]; \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- ss[4] ^= k[4*(i)]; \
- k[4*(i)+4] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+1]; \
- k[4*(i)+5] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+2]; \
- k[4*(i)+6] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+3]; \
- k[4*(i)+7] = ff(ss[4]); \
-}
-
-#define kd4(k,i) \
-{ \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- ss[4] = ff(ss[4]); \
- k[4*(i)+4] = ss[4] ^= k[4*(i)]; \
- k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \
- k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; \
- k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \
-}
-
-#define kdl4(k,i) \
-{ \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
- k[4*(i)+5] = ss[1] ^ ss[3]; \
- k[4*(i)+6] = ss[0]; \
- k[4*(i)+7] = ss[1]; \
-}
-
-#define kdf6(k,i) \
-{ \
- ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 6] = ff(ss[0]); \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ff(ss[1]); \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ff(ss[2]); \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ff(ss[3]); \
- ss[4] ^= ss[3]; \
- k[6*(i)+10] = ff(ss[4]); \
- ss[5] ^= ss[4]; \
- k[6*(i)+11] = ff(ss[5]); \
-}
-
-#define kd6(k,i) \
-{ \
- ss[6] = ls_box(ss[5],3) ^ rcon_tab[i]; \
- ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \
- k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \
- ss[4] ^= ss[3]; \
- k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \
- ss[5] ^= ss[4]; \
- k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \
-}
-
-#define kdl6(k,i) \
-{ \
- ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 6] = ss[0]; \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ss[1]; \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ss[2]; \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ss[3]; \
-}
-
-#define kdf8(k,i) \
-{ \
- ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 8] = ff(ss[0]); \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = ff(ss[1]); \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = ff(ss[2]); \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = ff(ss[3]); \
- ss[4] ^= ls_box(ss[3],0); \
- k[8*(i)+12] = ff(ss[4]); \
- ss[5] ^= ss[4]; \
- k[8*(i)+13] = ff(ss[5]); \
- ss[6] ^= ss[5]; \
- k[8*(i)+14] = ff(ss[6]); \
- ss[7] ^= ss[6]; \
- k[8*(i)+15] = ff(ss[7]); \
-}
-
-#define kd8(k,i) \
-{ \
- u32 __g = ls_box(ss[7],3) ^ rcon_tab[i]; \
- ss[0] ^= __g; \
- __g = ff(__g); \
- k[8*(i)+ 8] = __g ^= k[8*(i)]; \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = __g ^= k[8*(i)+ 1]; \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = __g ^= k[8*(i)+ 2]; \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = __g ^= k[8*(i)+ 3]; \
- __g = ls_box(ss[3],0); \
- ss[4] ^= __g; \
- __g = ff(__g); \
- k[8*(i)+12] = __g ^= k[8*(i)+ 4]; \
- ss[5] ^= ss[4]; \
- k[8*(i)+13] = __g ^= k[8*(i)+ 5]; \
- ss[6] ^= ss[5]; \
- k[8*(i)+14] = __g ^= k[8*(i)+ 6]; \
- ss[7] ^= ss[6]; \
- k[8*(i)+15] = __g ^= k[8*(i)+ 7]; \
-}
-
-#define kdl8(k,i) \
-{ \
- ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 8] = ss[0]; \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = ss[1]; \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = ss[2]; \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = ss[3]; \
-}
-
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- int i;
- u32 ss[8];
- struct aes_ctx *ctx = ctx_arg;
-
- /* encryption schedule */
-
- ctx->ekey[0] = ss[0] = u32_in(in_key);
- ctx->ekey[1] = ss[1] = u32_in(in_key + 4);
- ctx->ekey[2] = ss[2] = u32_in(in_key + 8);
- ctx->ekey[3] = ss[3] = u32_in(in_key + 12);
-
- switch(key_len) {
- case 16:
- for (i = 0; i < 9; i++)
- ke4(ctx->ekey, i);
- kel4(ctx->ekey, 9);
- ctx->rounds = 10;
- break;
-
- case 24:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
- for (i = 0; i < 7; i++)
- ke6(ctx->ekey, i);
- kel6(ctx->ekey, 7);
- ctx->rounds = 12;
- break;
-
- case 32:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
- ctx->ekey[6] = ss[6] = u32_in(in_key + 24);
- ctx->ekey[7] = ss[7] = u32_in(in_key + 28);
- for (i = 0; i < 6; i++)
- ke8(ctx->ekey, i);
- kel8(ctx->ekey, 6);
- ctx->rounds = 14;
- break;
-
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- /* decryption schedule */
-
- ctx->dkey[0] = ss[0] = u32_in(in_key);
- ctx->dkey[1] = ss[1] = u32_in(in_key + 4);
- ctx->dkey[2] = ss[2] = u32_in(in_key + 8);
- ctx->dkey[3] = ss[3] = u32_in(in_key + 12);
-
- switch (key_len) {
- case 16:
- kdf4(ctx->dkey, 0);
- for (i = 1; i < 9; i++)
- kd4(ctx->dkey, i);
- kdl4(ctx->dkey, 9);
- break;
-
- case 24:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
- kdf6(ctx->dkey, 0);
- for (i = 1; i < 7; i++)
- kd6(ctx->dkey, i);
- kdl6(ctx->dkey, 7);
- break;
-
- case 32:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
- ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24));
- ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28));
- kdf8(ctx->dkey, 0);
- for (i = 1; i < 6; i++)
- kd8(ctx->dkey, i);
- kdl8(ctx->dkey, 6);
- break;
- }
- return 0;
-}
-
-static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src)
-{
- aes_enc_blk(src, dst, ctx);
-}
-static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
-{
- aes_dec_blk(src, dst, ctx);
-}
-
-
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
- .cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt
- }
- }
-};
-
-static int __init aes_init(void)
-{
- gen_tabs();
- return crypto_register_alg(&aes_alg);
-}
-
-static void __exit aes_fini(void)
-{
- crypto_unregister_alg(&aes_alg);
-}
-
-module_init(aes_init);
-module_exit(aes_fini);
-
-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter");
-MODULE_ALIAS("aes");
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
memset(line, 0, LINE_SIZE);
if (len > LINE_SIZE)
len = LINE_SIZE;
switch (cmd) {
default:
- return -ENOTTY;
+ return -ENOIOCTLCMD;
case MTRRIOC_ADD_ENTRY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
-#ifdef CONFIG_TUX
- .long __sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .long sys_tux
-# else
- .long sys_ni_syscall
-# endif
-#endif
+ .long sys_ni_syscall /* reserved for TUX */
.long sys_ni_syscall
.long sys_gettid
.long sys_readahead /* 225 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* reserved for kexec */
- .long sys_ioprio_set
- .long sys_ioprio_get /* 285 */
syscall_table_size=(.-sys_call_table)
#include <linux/tty.h>
#include <linux/highmem.h>
#include <linux/time.h>
-#include <linux/nmi.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/nmi.h>
#include <asm/ist.h>
-#include <asm/e820.h>
extern void dump_thread(struct pt_regs *, struct user *);
extern spinlock_t rtc_lock;
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL_GPL(empty_zero_page);
-
-#ifdef CONFIG_CRASH_DUMP_MODULE
-#ifdef CONFIG_SMP
-extern irq_desc_t irq_desc[NR_IRQS];
-extern unsigned long irq_affinity[NR_IRQS];
-extern void stop_this_cpu(void *);
-EXPORT_SYMBOL(irq_desc);
-EXPORT_SYMBOL(irq_affinity);
-EXPORT_SYMBOL(stop_this_cpu);
-EXPORT_SYMBOL(dump_send_ipi);
-#endif
-extern int pfn_is_ram(unsigned long);
-EXPORT_SYMBOL(pfn_is_ram);
-#ifdef ARCH_HAS_NMI_WATCHDOG
-EXPORT_SYMBOL(touch_nmi_watchdog);
-#endif
-#endif
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
- printk("could not set ID!\n");
+ panic("could not set ID!\n");
else
printk(" ok.\n");
}
}
}
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+ panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
}
/*
}
-/*
- * These should really be __section__(".bss.page_aligned") as well, but
- * gcc's 3.0 and earlier don't handle that correctly.
- */
-static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
-static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
/*
* allocate per-cpu stacks for hardirq and for softirq processing
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/sysdev.h>
-#include <linux/dump.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
bust_spinlocks(1);
printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip);
show_registers(regs);
- dump("NMI Watchdog detected LOCKUP", regs);
printk("console shuts up ...\n");
console_silent();
spin_unlock(&nmi_print_lock);
return 0;
}
+/*
+ * Get a random word:
+ */
+static inline unsigned int get_random_int(void)
+{
+ unsigned int val = 0;
+
+ if (!exec_shield_randomize)
+ return 0;
+
+#ifdef CONFIG_X86_HAS_TSC
+ rdtscl(val);
+#endif
+ val += current->pid + jiffies + (int)&val;
+
+ /*
+ * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
+ * every second, from the entropy pool (and thus creates a limited
+ * drain on it), and uses halfMD4Transform within the second. We
+ * also spice it with the TSC (if available), jiffies, PID and the
+ * stack address:
+ */
+ return secure_ip_id(val);
+}
unsigned long arch_align_stack(unsigned long sp)
{
return sp & ~0xf;
}
+#if SHLIB_BASE >= 0x01000000
+# error SHLIB_BASE must be under 16MB!
+#endif
+
+static unsigned long
+arch_get_unmapped_nonexecutable_area(struct mm_struct *mm, unsigned long addr, unsigned long len)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ unsigned long stack_limit;
+ int first_time = 1;
+
+ if (!mm->mmap_top) {
+ printk("hm, %s:%d, !mmap_top.\n", current->comm, current->pid);
+ mm->mmap_top = mmap_top();
+ }
+ stack_limit = mm->mmap_top;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* dont allow allocations above current stack limit */
+ if (mm->non_executable_cache > stack_limit)
+ mm->non_executable_cache = stack_limit;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ /* make sure it can fit in the remaining address space */
+ if (mm->non_executable_cache < len)
+ return -ENOMEM;
+
+ /* either no address requested or cant fit in requested address hole */
+try_again:
+ addr = (mm->non_executable_cache - len)&PAGE_MASK;
+ do {
+ if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ return -ENOMEM;
+
+ /* new region fits between prev_vma->vm_end and vma->vm_start, use it */
+ if (addr+len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) {
+ /* remember the address as a hint for next time */
+ mm->non_executable_cache = addr;
+ return addr;
+
+ /* pull non_executable_cache down to the first hole */
+ } else if (mm->non_executable_cache == vma->vm_end)
+ mm->non_executable_cache = vma->vm_start;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ } while (len <= vma->vm_start);
+ /* if hint left us with no space for the requested mapping try again */
+ if (first_time) {
+ first_time = 0;
+ mm->non_executable_cache = stack_limit;
+ goto try_again;
+ }
+ return -ENOMEM;
+}
+
+static unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+ unsigned long range = end - len - start;
+ if (end <= start + len)
+ return 0;
+ return PAGE_ALIGN(get_random_int() % range + start);
+}
+
+static inline unsigned long
+stock_arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ start_addr = addr = mm->free_area_cache;
+
+full_search:
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len0, unsigned long pgoff, unsigned long flags,
+ unsigned long prot)
+{
+ unsigned long addr = addr0, len = len0;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ascii_shield = 0;
+ unsigned long tmp;
+
+ /*
+ * Fall back to the old layout:
+ */
+ if (!(current->flags & PF_RELOCEXEC))
+ return stock_arch_get_unmapped_area(filp, addr0, len0, pgoff, flags);
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (!addr && (prot & PROT_EXEC) && !(flags & MAP_FIXED))
+ addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start)) {
+ return addr;
+ }
+ }
+
+ if (prot & PROT_EXEC) {
+ ascii_shield = 1;
+ addr = SHLIB_BASE;
+ } else {
+ /* this can fail if the stack was unlimited */
+ if ((tmp = arch_get_unmapped_nonexecutable_area(mm, addr, len)) != -ENOMEM)
+ return tmp;
+search_upper:
+ addr = PAGE_ALIGN(arch_align_stack(TASK_UNMAPPED_BASE));
+ }
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Must not let a PROT_EXEC mapping get into the
+ * brk area:
+ */
+ if (ascii_shield && (addr + len > mm->brk)) {
+ ascii_shield = 0;
+ goto search_upper;
+ }
+ /*
+ * Up until the brk area we randomize addresses
+ * as much as possible:
+ */
+ if (ascii_shield && (addr >= 0x01000000)) {
+ tmp = randomize_range(0x01000000, mm->brk, len);
+ vma = find_vma(mm, tmp);
+ if (TASK_SIZE - len >= tmp &&
+ (!vma || tmp + len <= vma->vm_start))
+ return tmp;
+ }
+ /*
+ * Ok, randomization didnt work out - return
+ * the result of the linear search:
+ */
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+}
void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
{
current->mm->brk = new_brk;
}
+/*
+ * Top of mmap area (just below the process stack).
+ * leave an at least ~128 MB hole. Randomize it.
+ */
+#define MIN_GAP (128*1024*1024)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+unsigned long mmap_top(void)
+{
+ unsigned long gap = 0;
+
+ gap = current->rlim[RLIMIT_STACK].rlim_cur;
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ gap = arch_align_stack(gap) & PAGE_MASK;
+
+ return TASK_SIZE - gap;
+}
+
*/
#define LOWMEMSIZE() (0x9f000)
-unsigned long crashdump_addr = 0xdeadbeef;
-
static void __init parse_cmdline_early (char ** cmdline_p)
{
char c = ' ', *to = command_line, *from = saved_command_line;
if (c == ' ' && !memcmp(from, "highmem=", 8))
highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
- if (c == ' ' && !memcmp(from, "crashdump=", 10))
- crashdump_addr = memparse(from+10, &from);
-
c = *(from++);
if (!c)
break;
static char * __init machine_specific_memory_setup(void);
-#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-extern void crashdump_reserve(void);
-#endif
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
#endif
-#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
- crashdump_reserve(); /* Preserve crash dump state from prev boot */
-#endif
-
dmi_scan_machine();
#ifdef CONFIG_X86_GENERICARCH
See vsyscall-sigreturn.S. */
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
+extern SYSENTER_RETURN;
static void setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
-#include <linux/dump.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
*/
cfg = __prepare_ICR(shortcut, vector);
- if (vector == DUMP_VECTOR) {
- /*
- * Setup DUMP IPI to be delivered as an NMI
- */
- cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
- }
-
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
-
- if (vector == DUMP_VECTOR) {
- /*
- * Setup DUMP IPI to be delivered as an NMI
- */
- cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
- }
+
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
-void dump_send_ipi(void)
-{
- send_IPI_allbutself(DUMP_VECTOR);
-}
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
return 0;
}
-void stop_this_cpu (void * dummy)
+static void stop_this_cpu (void * dummy)
{
/*
* Remove this CPU:
local_irq_enable();
}
-EXPORT_SYMBOL(smp_send_stop);
-
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
atomic_inc(&call_data->finished);
}
}
+
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ if (get_user(fourth.__pad, (void * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
extern void SYSENTER_RETURN_OFFSET;
-unsigned int vdso_enabled = 0;
+unsigned int vdso_enabled = 1;
void map_vsyscall(void)
{
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/version.h>
-#include <linux/dump.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
show_registers(regs);
if (netdump_func)
netdump_func(regs);
- dump((char *)str, regs);
bust_spinlocks(0);
die_owner = -1;
spin_unlock_irq(&die_lock);
{
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
- panic("Halting\n");
+
/* Clear and disable the memory parity error line. */
clear_mem_error(reason);
}
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/dmi.h>
#include <asm/mach-bigsmp/mach_apic.h>
#include <asm/mach-bigsmp/mach_apicdef.h>
#include <asm/mach-bigsmp/mach_ipi.h>
#include <asm/mach-default/mach_mpparse.h>
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static __init int hp_ht_bigsmp(struct dmi_system_id *d)
-{
-#ifdef CONFIG_X86_GENERICARCH
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
- dmi_bigsmp = 1;
-#endif
- return 0;
-}
-
-
-static struct dmi_system_id __initdata bigsmp_dmi_table[] = {
- { hp_ht_bigsmp, "HP ProLiant DL760 G2", {
- DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
- }},
-
- { hp_ht_bigsmp, "HP ProLiant DL740", {
- DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
- }},
- { }
-};
-
+int dmi_bigsmp; /* can be set by dmi scanners */
static __init int probe_bigsmp(void)
{
- dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
# Makefile for the linux i386-specific parts of the memory manager.
#
-obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
+obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
- struct zone *high0 = &NODE_DATA(0)->node_zones[ZONE_HIGHMEM];
- if (high0->spanned_pages > 0)
- highmem_start_page = high0->zone_mem_map;
- else
- highmem_start_page = pfn_to_page(max_low_pfn+1);
+ highmem_start_page = NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_mem_map;
num_physpages = highend_pfn;
#else
num_physpages = max_low_pfn;
extern int is_available_memory(efi_memory_desc_t *);
-static inline int page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
unsigned long addr, end;
return 0;
}
-/* To enable modules to check if a page is in RAM */
-int pfn_is_ram(unsigned long pfn)
-{
- return (page_is_ram(pfn));
-}
-
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
- * valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
- if (pagenr <= 256)
- return 1;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
-}
-
-
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
+++ /dev/null
-/*
- * linux/arch/i386/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(struct mm_struct *mm)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base(mm);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
if ( dev2->irq && dev2->irq != irq && \
(!(pci_probe & PCI_USE_PIRQ_MASK) || \
((1 << dev2->irq) & mask)) ) {
-#ifndef CONFIG_PCI_MSI
+#ifndef CONFIG_PCI_USE_VECTOR
printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
pci_name(dev2), dev2->irq, irq);
#endif
}
dev = temp_dev;
if (irq >= 0) {
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
if (!platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
#endif
config DISCONTIGMEM
bool "Discontiguous memory support"
- depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1) && NUMA && VIRTUAL_MEM_MAP
+ depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
help
Say Y to support efficient handling of discontiguous physical memory,
See <file:Documentation/vm/numa> for more.
config IA64_CYCLONE
- bool "Cyclone (EXA) Time Source support"
+ bool "Support Cyclone(EXA) Time Source"
help
- Say Y here to enable support for IBM EXA Cyclone time source.
- If you're unsure, answer N.
+ Say Y here to enable support for IBM EXA Cyclone time source.
+ If you're unsure, answer N.
config IOSAPIC
bool
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
-core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
-
+ifeq ($(CONFIG_DISCONTIGMEM),y)
+ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
+endif
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
-drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
+ifeq ($(CONFIG_DISCONTIGMEM),y)
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/sn/
+endif
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=20
-CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
#
# Processor type and features
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_TIME_INTERPOLATION=y
-CONFIG_EFI=y
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
CONFIG_IA64_GENERIC=y
# CONFIG_IA64_DIG is not set
+# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_SGI_SN2 is not set
-# CONFIG_IA64_HP_SIM is not set
-# CONFIG_ITANIUM is not set
-CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
+CONFIG_ACPI=y
+CONFIG_ACPI_INTERPRETER=y
+CONFIG_ACPI_KERNEL_CONFIG=y
CONFIG_IA64_L1_CACHE_SHIFT=7
+# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set
CONFIG_NUMA=y
-CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_DISCONTIGMEM=y
-CONFIG_IA64_CYCLONE=y
+CONFIG_VIRTUAL_MEM_MAP=y
+CONFIG_IA64_MCA=y
+CONFIG_PM=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=18
+# CONFIG_HUGETLB_PAGE_SIZE_4GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_16MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256KB is not set
+# CONFIG_IA64_PAL_IDLE is not set
CONFIG_SMP=y
-CONFIG_NR_CPUS=512
-CONFIG_HOTPLUG_CPU=y
# CONFIG_PREEMPT is not set
-CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
+CONFIG_HAVE_DEC_LOCK=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
-
-#
-# Firmware Drivers
-#
+CONFIG_EFI=y
CONFIG_EFI_VARS=y
-CONFIG_EFI_PCDP=y
+CONFIG_NR_CPUS=512
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
-#
-# Power management and ACPI
-#
-CONFIG_PM=y
-CONFIG_ACPI=y
-
#
# ACPI (Advanced Configuration and Power Interface) Support
#
CONFIG_ACPI_BOOT=y
-CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_POWER=y
CONFIG_ACPI_PCI=y
CONFIG_ACPI_SYSTEM=y
-
-#
-# Bus options (PCI, PCMCIA)
-#
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
-# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
+CONFIG_HOTPLUG=y
#
# PCI Hotplug Support
# CONFIG_HOTPLUG_PCI_FAKE is not set
CONFIG_HOTPLUG_PCI_ACPI=m
# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_PCIE is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
#
# PCMCIA/CardBus support
# CONFIG_PCMCIA is not set
#
-# Device Drivers
+# Parallel port support
#
+# CONFIG_PARPORT is not set
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
#
+# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_INITRD is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_PDC202XX_NEW is not set
# CONFIG_BLK_DEV_SVWKS is not set
-CONFIG_BLK_DEV_SGIIOC4=y
# CONFIG_BLK_DEV_SIIMAGE is not set
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
CONFIG_IDEDMA_AUTO=y
# CONFIG_BLK_DEV_HD is not set
+#
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_BOOT=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+# CONFIG_FUSION_CTL is not set
+
#
# SCSI device support
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_REPORT_LUNS=y
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=y
-
#
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
CONFIG_SCSI_QLOGIC_FC=y
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_SCSI_QLA2XXX=y
-CONFIG_SCSI_QLA21XX=m
-CONFIG_SCSI_QLA22XX=m
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA6322 is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_RAID6=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_ISENSE is not set
-# CONFIG_FUSION_CTL is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
#
# Networking support
#
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
CONFIG_ARPD=y
+# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
# CONFIG_DE4X5 is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_DM9102 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=m
# CONFIG_EEPRO100_PIO is not set
CONFIG_E100=m
-# CONFIG_E100_NAPI is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=y
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
-# CONFIG_TR is not set
+# CONFIG_NET_RADIO is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-CONFIG_NETCONSOLE=y
#
-# ISDN subsystem
+# Amateur Radio support
#
-# CONFIG_ISDN is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
#
-# Telephony Support
+# Bluetooth support
#
-# CONFIG_PHONE is not set
+# CONFIG_BT is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_ROCKETPORT is not set
-# CONFIG_CYCLADES is not set
# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
+CONFIG_SGI_L1_SERIAL=y
+CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_ACPI=y
+CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# I2C Algorithms
+#
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C Hardware Sensors Chip support
+#
+# CONFIG_I2C_SENSOR is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
# CONFIG_QIC02_TAPE is not set
#
#
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
CONFIG_EFI_RTC=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_RAW_DRIVER=m
-CONFIG_HPET=y
-# CONFIG_HPET_RTC_IRQ is not set
-CONFIG_HPET_MMAP=y
CONFIG_MAX_RAW_DEVS=256
#
-# I2C support
+# Multimedia devices
#
-# CONFIG_I2C is not set
+# CONFIG_VIDEO_DEV is not set
#
-# Dallas's 1-wire bus
+# Digital Video Broadcasting Devices
#
-# CONFIG_W1 is not set
+# CONFIG_DVB is not set
#
-# Misc devices
+# File systems
#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
#
-# Multimedia devices
+# CD-ROM/DVD Filesystems
#
-# CONFIG_VIDEO_DEV is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+CONFIG_UDF_FS=m
#
-# Digital Video Broadcasting Devices
+# DOS/FAT/NT Filesystems
#
-# CONFIG_DVB is not set
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_GSS is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+CONFIG_CIFS=m
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
#
# Graphics support
# Advanced Linux Sound Architecture
#
CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_RAWMIDI=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_OSSEMUL=y
#
# Generic devices
#
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
#
# PCI devices
#
-CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_ALI5451 is not set
-# CONFIG_SND_ATIIXP is not set
-# CONFIG_SND_AU8810 is not set
-# CONFIG_SND_AU8820 is not set
-# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
-# CONFIG_SND_BT87X is not set
CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_NEW_DSP=y
CONFIG_SND_CS4281=m
CONFIG_SND_EMU10K1=m
# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
# CONFIG_SND_ES1968 is not set
# CONFIG_SND_MAESTRO3 is not set
CONFIG_SND_FM801=m
-# CONFIG_SND_FM801_TEA575X is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
-# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_SONICVIBES is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VX222 is not set
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_SPLIT_ISO is not set
-# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_PRINTER is not set
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
-# CONFIG_USB_STORAGE_RW_DETECT is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_WACOM is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
#
# USB Imaging devices
#
# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
#
# USB Miscellaneous drivers
#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
# CONFIG_USB_TIGL is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_TEST is not set
-
-#
-# USB Gadget Support
-#
# CONFIG_USB_GADGET is not set
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=y
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_SECURITY is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-# CONFIG_MSDOS_FS is not set
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-# CONFIG_NTFS_RW is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_XATTR is not set
-# CONFIG_CIFS_POSIX is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-CONFIG_SGI_PARTITION=y
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
#
# HP Simulator drivers
# CONFIG_IA64_DEBUG_CMPXCHG is not set
# CONFIG_IA64_DEBUG_IRQ is not set
# CONFIG_DEBUG_INFO is not set
-CONFIG_SYSVIPC_COMPAT=y
#
# Security options
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_DES is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_GENERIC is not set
+# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_TEST is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
-# CONFIG_EFI_PCDP is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_SVW is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
+CONFIG_SGI_L1_SERIAL=y
+CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_I2C is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
-CONFIG_EFI_PCDP=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_CARMEL is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
CONFIG_IDEDISK_MULTI_MODE=y
CONFIG_BLK_DEV_IDECD=y
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
# CONFIG_DM_CRYPT is not set
-# CONFIG_DM_SNAPSHOT is not set
-# CONFIG_DM_MIRROR is not set
-# CONFIG_DM_ZERO is not set
#
# Fusion MPT device support
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_ACPI=y
CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
# CONFIG_RAW_DRIVER is not set
-# CONFIG_HPET is not set
#
# I2C support
#
# CONFIG_I2C_SENSOR is not set
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_FSCHER is not set
# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
# CONFIG_SENSORS_LM80 is not set
# CONFIG_SENSORS_LM83 is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON_OLD is not set
CONFIG_FB_RADEON=m
# CONFIG_MDA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=y
-CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set
-# CONFIG_NFSD_TCP is not set
+CONFIG_NFSD_TCP=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_1251=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
- /*
- * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
- * if a TLB entry is purged while in use. sba_mark_invalid()
- * purges IOTLB entries in power-of-two sizes, so we also
- * allocate IOVA space in power-of-two sizes.
- */
- bits_wanted = 1UL << get_iovp_order(bits_wanted << PAGE_SHIFT);
-
if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) {
int bits_not_wanted = size >> iovp_shift;
unsigned long m;
- /* Round up to power-of-two size: see AR2305 note above */
- bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << PAGE_SHIFT);
for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
{
}
-static void
-hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
-{
-}
-
static struct hw_interrupt_type irq_type_hp_sim = {
.typename = "hpsim",
.startup = hpsim_irq_startup,
.disable = hpsim_irq_noop,
.ack = hpsim_irq_noop,
.end = hpsim_irq_noop,
- .set_affinity = hpsim_set_affinity_noop,
+ .set_affinity = (void (*)(unsigned int, unsigned long)) hpsim_irq_noop,
};
void __init
printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name);
}
+#ifdef CONFIG_NET_FASTROUTE
+static int
+simeth_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ printk(KERN_WARNING "%s: simeth_accept_fastpath called\n", dev->name);
+ return -1;
+}
+#endif
+
__initcall(simeth_probe);
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/security.h>
-#include <linux/vs_memory.h>
#include <asm/param.h>
#include <asm/signal.h>
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
-#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
-
/* Ugly but avoids duplication */
#include "../../../fs/binfmt_elf.c"
set_personality(PER_LINUX32);
current->thread.map_base = IA32_PAGE_OFFSET/3;
current->thread.task_size = IA32_PAGE_OFFSET; /* use what Linux/x86 uses... */
+ current->thread.flags |= IA64_THREAD_XSTACK; /* data must be executable */
set_fs(USER_DS); /* set addr limit for new TASK_SIZE */
}
data8 compat_clock_gettime /* 265 */
data8 compat_clock_getres
data8 compat_clock_nanosleep
- data8 compat_statfs64
- data8 compat_fstatfs64
+ data8 sys_statfs64
+ data8 sys_fstatfs64
data8 sys_tgkill /* 270 */
data8 compat_sys_utimes
data8 sys32_fadvise64_64
if (BAD_MADT_ENTRY(lapic, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
if (lapic->address) {
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
if (BAD_MADT_ENTRY(lsapic, end))
return -EINVAL;
- if (lsapic->flags.enabled) {
+ acpi_table_print_madt_entry(header);
+
+ printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
+
+ if (!lsapic->flags.enabled)
+ printk(" disabled");
+ else {
+ printk(" enabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
+ if (hard_smp_processor_id()
+ == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
+ printk(" (BSP)");
#endif
++available_cpus;
}
+ printk("\n");
+
total_cpus++;
return 0;
}
if (BAD_MADT_ENTRY(lacpi_nmi, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/* TBD: Support lapic_nmi entries */
return 0;
}
if (BAD_MADT_ENTRY(iosapic, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
iosapic_init(iosapic->address, iosapic->global_irq_base);
return 0;
if (BAD_MADT_ENTRY(plintsrc, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/*
* Get vector assignment for this interrupt, set attributes,
* and program the IOSAPIC routing table.
if (BAD_MADT_ENTRY(p, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/* TBD: Support nimsrc entries */
return 0;
}
-static void __init
-acpi_madt_oem_check (char *oem_id, char *oem_table_id)
+/* Hook from generic ACPI tables.c */
+void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strncmp(oem_id, "IBM", 3) &&
- (!strncmp(oem_table_id, "SERMOW", 6))) {
+ (!strncmp(oem_table_id, "SERMOW", 6))){
- /*
- * Unfortunately ITC_DRIFT is not yet part of the
+ /* Unfortunatly ITC_DRIFT is not yet part of the
* official SAL spec, so the ITC_DRIFT bit is not
* set by the BIOS on this hardware.
*/
sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
- cyclone_setup();
+ /*Start cyclone clock*/
+ cyclone_setup(0);
}
}
#define CYCLONE_TIMER_FREQ 100000000
int use_cyclone;
-void __init cyclone_setup(void)
+int __init cyclone_setup(char *str)
{
use_cyclone = 1;
+ return 1;
}
static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
data8 sys_syslog
data8 sys_setitimer
data8 sys_getitimer
-#ifdef CONFIG_TUX
- data8 __sys_tux // 1120 /* was: ia64_oldstat */
-#else
-# ifdef CONFIG_TUX_MODULE
- data8 sys_tux // 1120 /* was: ia64_oldstat */
-# else
data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
-# endif
-#endif
data8 sys_ni_syscall /* was: ia64_oldlstat */
data8 sys_ni_syscall /* was: ia64_oldfstat */
data8 sys_vhangup
data8 sys_mq_notify
data8 sys_mq_getsetattr
data8 sys_ni_syscall // reserved for kexec_load
- data8 sys_vserver
+ data8 sys_ni_syscall
data8 sys_ni_syscall // 1270
data8 sys_ni_syscall
data8 sys_ni_syscall
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#define __KERNEL_SYSCALLS__
#include <asm/unistd.h>
EXPORT_SYMBOL(__ia64_syscall);
-EXPORT_SYMBOL(execve);
-EXPORT_SYMBOL(clone);
/* from arch/ia64/lib */
extern void __divsi3(void);
/*
* This is updated when the user sets irq affinity via /proc
*/
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
+cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
#ifdef CONFIG_IA64_GENERIC
irq_desc_t * __ia64_irq_desc (unsigned int irq)
int prelen;
irq_desc_t *desc = irq_descp(irq);
unsigned long flags;
- int redir = 0;
if (!desc->handler->set_affinity)
return -EIO;
prelen = 0;
if (tolower(*rbuf) == 'r') {
prelen = strspn(rbuf, "Rr ");
- redir++;
+ irq |= IA64_IRQ_REDIRECTED;
}
err = cpumask_parse(buffer+prelen, count-prelen, new_value);
spin_lock_irqsave(&desc->lock, flags);
pending_irq_cpumask[irq] = new_value;
- if (redir)
- set_bit(irq, pending_irq_redir);
- else
- clear_bit(irq, pending_irq_redir);
spin_unlock_irqrestore(&desc->lock, flags);
return full_count;
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
- int redir = test_bit(irq, pending_irq_redir);
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
- desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
- pending_irq_cpumask[irq]);
+ desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
*/
static int cpe_poll_enabled = 1;
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
+static int cpe_vector = -1;
-static int mca_init;
+extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
/*
* IA64_MCA log support
#ifdef CONFIG_ACPI
-static int cpe_vector = -1;
-
static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{
}
IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __FUNCTION__, cpev);
+ "vector %#x setup and enabled\n", __FUNCTION__, cpev);
}
#endif /* CONFIG_ACPI */
/*
* ia64_mca_cmc_vector_setup
*
- * Setup the corrected machine check vector register in the processor.
- * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- * This function is invoked on a per-processor basis.
+ * Setup the corrected machine check vector register in the processor and
+ * unmask interrupt. This function is invoked on a per-processor basis.
*
* Inputs
* None
cmcv_reg_t cmcv;
cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
+ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x registered.\n",
+ "machine check vector %#x setup and enabled.\n",
__FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
*/
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
- ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on BSP & enable */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
- cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
- register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ {
+ irq_desc_t *desc;
+ unsigned int irq;
+
+ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+
+ if (cpe_vector >= 0) {
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ if (irq_to_vector(irq) == cpe_vector) {
+ desc = irq_descp(irq);
+ desc->status |= IRQ_PER_CPU;
+ setup_irq(irq, &mca_cpe_irqaction);
+ }
+ ia64_mca_register_cpev(cpe_vector);
+ }
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ }
#endif
/* Initialize the areas set aside by the OS to buffer the
ia64_log_init(SAL_INFO_TYPE_CMC);
ia64_log_init(SAL_INFO_TYPE_CPE);
- mca_init = 1;
printk(KERN_INFO "MCA related initialization done\n");
}
static int __init
ia64_mca_late_init(void)
{
- if (!mca_init)
- return 0;
-
- /* Setup the CMCI/P vector and handler */
init_timer(&cmc_poll_timer);
cmc_poll_timer.function = ia64_mca_cmc_poll;
- /* Unmask/enable the vector */
+ /* Reset to the correct state */
cmc_polling_enabled = 0;
- schedule_work(&cmc_enable_work);
-
- IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
-#ifdef CONFIG_ACPI
- /* Setup the CPEI/P vector and handler */
init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll;
- {
- irq_desc_t *desc;
- unsigned int irq;
-
- if (cpe_vector >= 0) {
- /* If platform supports CPEI, enable the irq. */
- cpe_poll_enabled = 0;
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- setup_irq(irq, &mca_cpe_irqaction);
- }
- ia64_mca_register_cpev(cpe_vector);
- IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
- } else {
- /* If platform doesn't support CPEI, get the timer going. */
- if (cpe_poll_enabled) {
- ia64_mca_cpe_poll(0UL);
- IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
- }
- }
+#ifdef CONFIG_ACPI
+ /* If platform doesn't support CPEI, get the timer going. */
+ if (cpe_vector < 0 && cpe_poll_enabled) {
+ ia64_mca_cpe_poll(0UL);
+ } else {
+ cpe_poll_enabled = 0;
}
#endif
static inline unsigned long
pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
- return get_unmapped_area(file, addr, len, pgoff, flags);
+ return get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
mntput(pfmfs_mnt);
}
+static loff_t
+pfm_lseek(struct file *file, loff_t offset, int whence)
+{
+ DPRINT(("pfm_lseek called\n"));
+ return -ESPIPE;
+}
+
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
return -EINVAL;
}
+ /*
+ * seeks are not allowed on message queues
+ */
+ if (ppos != &filp->f_pos) return -ESPIPE;
PROTECT_CTX(ctx, flags);
static struct file_operations pfm_file_ops = {
- .llseek = no_llseek,
+ .llseek = pfm_lseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
*/
if (task == current || ctx->ctx_fl_system) return 0;
- /*
- * if context is UNLOADED we are safe to go
- */
- if (state == PFM_CTX_UNLOADED) return 0;
-
/*
* no command can operate on a zombie context
*/
}
/*
- * context is LOADED or MASKED. Some commands may need to have
- * the task stopped.
- *
+ * if context is UNLOADED, MASKED we are safe to go
+ */
+ if (state != PFM_CTX_LOADED) return 0;
+
+ /*
+ * context is LOADED, we must make sure the task is stopped
* We could lift this restriction for UP but it would mean that
* the user has no guarantee the task would not run between
* two successive calls to perfmonctl(). That's probably OK.
return error;
}
+void
+ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
+{
+ set_personality(PER_LINUX);
+ if (elf_ex->e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK)
+ current->thread.flags |= IA64_THREAD_XSTACK;
+ else
+ current->thread.flags &= ~IA64_THREAD_XSTACK;
+}
+
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
+ void *saldata;
+ size_t size;
u8 *buf;
u64 bufsize;
buf = NULL;
bufsize = 0;
}
- return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
+ if (*ppos >= bufsize)
+ return 0;
+
+ saldata = buf + file->f_pos;
+ size = bufsize - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, saldata, size))
+ return -EFAULT;
+
+ *ppos += size;
+ return size;
}
static void
}
#endif
- /* enable IA-64 Machine Check Abort Handling unless disabled */
- if (!strstr(saved_command_line, "nomca"))
- ia64_mca_init();
-
+ /* enable IA-64 Machine Check Abort Handling */
+ ia64_mca_init();
+
platform_setup(cmdline_p);
paging_init();
}
smp_setup_percpu_timer();
- ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
grow = PAGE_SIZE >> PAGE_SHIFT;
if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur
- || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
- current->rlim[RLIMIT_AS].rlim_cur))
+ || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur))
return -ENOMEM;
- if (!vx_vmpages_avail(vma->vm_mm, grow) ||
- ((vma->vm_flags & VM_LOCKED) &&
- !vx_vmlocked_avail(vma->vm_mm, grow)))
+ if (!vx_vmpages_avail(vma->vm_mm, grow)
return -ENOMEM;
vma->vm_end += PAGE_SIZE;
// vma->vm_mm->total_vm += grow;
low = pgt_cache_water[0];
high = pgt_cache_water[1];
- preempt_disable();
if (pgtable_cache_size > (u64) high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > (u64) low);
}
- preempt_enable();
}
void
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
+ vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
insert_vm_struct(current->mm, vma);
}
{
struct page *page;
/*
- * EFI uses 4KB pages while the kernel can use 4KB or bigger.
+ * EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
* therefore possible to have the initrd share the same page as
* the end of the kernel (given current setup).
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
- setup_gate();
+ setup_gate(); /* setup gate pages before we free up boot memory... */
#ifdef CONFIG_IA32_SUPPORT
ia32_boot_gdt_init();
*
* This code is executed once for each Hub chip.
*/
-static void __init
+static void
per_hub_init(cnodeid_t cnode)
{
nasid_t nasid;
klhwg_add_all_modules(hwgraph_root);
klhwg_add_all_nodes(hwgraph_root);
- for (cnode = 0; cnode < numionodes; cnode++)
+ for (cnode = 0; cnode < numionodes; cnode++) {
+ extern void per_hub_init(cnodeid_t);
per_hub_init(cnode);
+ }
/*
*
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
- ii_ieclr_u_t ieclr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
- /* Clear IBLS0/1 error bits */
- ieclr.ii_ieclr_regval = 0;
- if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
- ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
- if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
- ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
- REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
-
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/nodedata.h>
+#include <asm/delay.h>
#include <linux/bootmem.h>
#include <linux/string.h>
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
- u64 transfer_stat;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
if (!(mode & BTE_WACQUIRE)) {
return BTEFAIL_NOTAVAIL;
}
+
+ /* Wait until a bte is available. */
+ udelay(1);
} while (1);
return BTE_SUCCESS;
}
- while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
+ while (*bte->most_rcnt_na == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
- if (transfer_stat & IBLS_ERROR) {
- bte_status = transfer_stat & ~IBLS_ERROR;
+ if (*bte->most_rcnt_na & IBLS_ERROR) {
+ bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
*bte->most_rcnt_na = 0L;
} else {
bte_status = BTE_SUCCESS;
}
u8
-sn_irq_to_vector(unsigned int irq)
+sn_irq_to_vector(u8 irq)
{
return(irq);
}
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
- if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0 ||
- efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0)
+ if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
return 0;
}
return 1;
}
-void bvme6000_reset(void)
+void bvme6000_reset()
{
volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
_060_real_lock_page:
move.l %d2,-(%sp)
| load sfc/dfc
+ moveq #5,%d0
tst.b %d0
jne 1f
moveq #1,%d0
- jra 2f
-1: moveq #5,%d0
-2: movec.l %dfc,%d2
+1: movec.l %dfc,%d2
movec.l %d0,%dfc
movec.l %d0,%sfc
}
#endif
- if (CPU_IS_060) {
- u32 pcr;
-
- asm (".chip 68060; movec %%pcr,%0; .chip 68k"
- : "=d" (pcr));
- if (((pcr >> 8) & 0xff) <= 5) {
- printk("Enabling workaround for errata I14\n");
- asm (".chip 68060; movec %0,%%pcr; .chip 68k"
- : : "d" (pcr | 0x20));
- }
- }
-
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
/*
* user process trying to return with weird frame format
*/
-#ifdef DEBUG
+#if DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
/*
* user process trying to return with weird frame format
*/
-#ifdef DEBUG
+#if DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
+#if DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
+#if DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
-#ifdef DEBUG
+#if DEBUG
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
-#ifdef DEBUG
+#if DEBUG
unsigned long desc;
printk ("pid = %x ", current->pid);
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
-#ifdef DEBUG
+#if DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
#endif
mmusr = temp;
-#ifdef DEBUG
+#if DEBUG
printk("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk("descriptor address is %#lx, contents %#lx\n",
: "a" (&tlong));
printk("tt1 is %#lx\n", tlong);
#endif
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 1\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
should still create the ATC entry. */
goto create_atc_entry;
-#ifdef DEBUG
+#if DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk ("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 2\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
-#ifdef DEBUG
+#if DEBUG
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
printk ("\n");
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+extern void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *endstack;
int i;
* csum_partial_copy_from_user.
*/
-#include <linux/module.h>
#include <net/checksum.h>
/*
#endif
if (irq < VIA1_SOURCE_BASE) {
- cpu_free_irq(irq, dev_id);
- return;
+ return cpu_free_irq(irq, dev_id);
}
if (irq >= NUM_MAC_SOURCES) {
static inline void free_io_area(void *addr)
{
- vfree((void *)(PAGE_MASK & (unsigned long)addr));
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
return 0;
}
-#ifdef DEBUG_INVALID_PTOV
+#if DEBUG_INVALID_PTOV
int mm_inv_cnt = 5;
#endif
voff += m68k_memory[i].size;
} while (++i < m68k_num_memory);
-#ifdef DEBUG_INVALID_PTOV
+#if DEBUG_INVALID_PTOV
if (mm_inv_cnt > 0) {
mm_inv_cnt--;
printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
return 1;
}
-void mvme147_reset(void)
+void mvme147_reset()
{
printk ("\r\n\nCalled mvme147_reset\r\n");
m147_pcc->watchdog = 0x0a; /* Clear timer */
return 1;
}
-void mvme16x_reset(void)
+void mvme16x_reset()
{
printk ("\r\n\nCalled mvme16x_reset\r\n"
"\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
}
#endif
-void q40_reset(void)
+void q40_reset()
{
halted=1;
printk ("\n\n*******************************************\n"
Q40_LED_ON();
while(1) ;
}
-void q40_halt(void)
+void q40_halt()
{
halted=1;
printk ("\n\n*******************\n"
return 0;
}
-unsigned int q40_get_ss(void)
+unsigned int q40_get_ss()
{
return bcd2bin(Q40_RTC_SECS);
}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
goto out;
if (pos < 0)
goto out;
- ret = -ESPIPE;
- if (!(file->f_mode & FMODE_PREAD))
- goto out;
ret = read(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_ACCESS);
if (pos < 0)
goto out;
- ret = -ESPIPE;
- if (!(file->f_mode & FMODE_PWRITE))
- goto out;
-
ret = write(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
-#include <linux/vs_cvirt.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <linux/socket.h>
#include <linux/security.h>
#include <linux/syscalls.h>
-#include <linux/vs_cvirt.h>
#include <asm/ptrace.h>
#include <asm/page.h>
/* And the same for proc */
int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ r = proc_dostring(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
/* proc function to write EEPROM after changing int entry */
int proc_dolasatint(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
#ifdef CONFIG_DS1603
/* proc function to read/write RealTime Clock */
int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
if (rtctmp < 0)
rtctmp = 0;
}
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
static char proc_lasat_ipbuf[32];
/* Parsing of IP address */
int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int len;
unsigned int ip;
char *p, c;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
proc_lasat_ipbuf[len] = 0;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
/* Now see if we can convert it to a valid IP */
ip = in_aton(proc_lasat_ipbuf);
*(unsigned int *)(table->data) = ip;
len++;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
}
update_bcastaddr();
up(&lasat_info_sem);
}
int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
If you don't know what to do here, say N.
-config HOTPLUG_CPU
- bool
- default y if SMP
- select HOTPLUG
-
-config DISCONTIGMEM
- bool "Discontiguous memory support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa> for more.
-
config PREEMPT
bool
# bool "Preemptible Kernel"
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
-config DEBUG_SPINLOCK
- bool "Spinlock debugging"
- depends on DEBUG_KERNEL
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_RWLOCK
- bool "Read-write spinlock debugging"
- depends on DEBUG_KERNEL && SMP
- help
- If you say Y here then read-write lock processing will count how many
- times it has tried to get the lock and issue an error message after
- too many attempts. If you suspect a rwlock problem or a kernel
- hacker asks for this option then say Y. Otherwise say N.
-
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
help
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# CONFIG_PA7000 is not set
# CONFIG_PA7100LC is not set
# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
CONFIG_PA8X00=y
CONFIG_PA20=y
-CONFIG_PREFETCH=y
CONFIG_PARISC64=y
CONFIG_64BIT=y
+# CONFIG_PDC_NARROW is not set
# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
CONFIG_COMPAT=y
CONFIG_IOSAPIC=y
CONFIG_IOMMU_SBA=y
# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-CONFIG_PDC_CHASSIS=y
+CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_PDC_CHASSIS is not set
#
# PCMCIA/CardBus support
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
+# CONFIG_SCSI_FC_ATTRS is not set
#
# SCSI low-level drivers
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
CONFIG_SCSI_SYM53C8XX_IOMAPPED=y
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
#
# CONFIG_PCMCIA_FDOMAIN is not set
# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
#
# Multi-device support (RAID and LVM)
#
# I2O device support
#
-# CONFIG_I2O is not set
#
# Networking support
#
# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
CONFIG_IP_NF_ARP_MANGLE=m
# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
#
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
CONFIG_LLC=m
CONFIG_LLC2=m
# CONFIG_IPX is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Network testing
#
CONFIG_NET_PKTGEN=m
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_8139_RXBUF_IDX=1
# CONFIG_SIS900 is not set
CONFIG_EPIC100=m
# CONFIG_SUNDANCE is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=m
#
CONFIG_IXGB=m
CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
#
# Wireless LAN (non-hamradio)
# CONFIG_PRISM54 is not set
CONFIG_NET_WIRELESS=y
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
#
# PCMCIA network device support
#
# CONFIG_PCMCIA_AXNET is not set
#
-# Wan interfaces
+# Amateur Radio support
#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
CONFIG_CIFS=m
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_TEST=m
#
# Library routines
#
CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=15
# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# Processor type and features
#
# CONFIG_PA7000 is not set
-CONFIG_PA7100LC=y
-# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
+# CONFIG_PA7100LC is not set
+CONFIG_PA7200=y
# CONFIG_PA8X00 is not set
CONFIG_PA11=y
# CONFIG_64BIT is not set
CONFIG_PCI_NAMES=y
CONFIG_GSC_DINO=y
# CONFIG_PCI_LBA is not set
-CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_CHASSIS_LCD_LED is not set
# CONFIG_PDC_CHASSIS is not set
#
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_DEBUG_DRIVER=y
#
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_CARMEL=y
# CONFIG_BLK_DEV_RAM is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_REPORT_LUNS is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_7000FASST is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AHA152X is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_ZALON is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PSI240I is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
-# CONFIG_NETFILTER is not set
+# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NETFILTER is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
#
# Ethernet (10 or 100Mbit)
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
+# CONFIG_FDDI is not set
+# CONFIG_PLIP is not set
+CONFIG_PPP=y
+# CONFIG_PPP_FILTER is not set
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_SLIP is not set
#
# Wireless LAN (non-hamradio)
#
CONFIG_NET_WIRELESS=y
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_PLIP is not set
-CONFIG_PPP=y
-# CONFIG_PPP_FILTER is not set
-# CONFIG_PPP_ASYNC is not set
-# CONFIG_PPP_SYNC_TTY is not set
-# CONFIG_PPP_DEFLATE is not set
-# CONFIG_PPP_BSDCOMP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
# CONFIG_INPUT_UINPUT is not set
# CONFIG_HP_SDC_RTC is not set
#
# CONFIG_I2C is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
# Graphics support
#
CONFIG_FB=y
-# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
# CONFIG_FB_RIVA is not set
CONFIG_DUMMY_CONSOLE_ROWS=64
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+# CONFIG_FAT_FS is not set
# CONFIG_NTFS_FS is not set
#
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
# Miscellaneous filesystems
#
# CONFIG_HFSPLUS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_NFS_V3=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_NFSD_TCP=y
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_TEST is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# CONFIG_PA7000 is not set
# CONFIG_PA7100LC is not set
# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
CONFIG_PA8X00=y
CONFIG_PA20=y
-CONFIG_PREFETCH=y
# CONFIG_PARISC64 is not set
# CONFIG_64BIT is not set
# CONFIG_SMP is not set
CONFIG_IOSAPIC=y
CONFIG_IOMMU_SBA=y
CONFIG_SUPERIO=y
-CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_CHASSIS_LCD_LED is not set
# CONFIG_PDC_CHASSIS is not set
#
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
# CONFIG_IDEDMA_AUTO is not set
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
+# CONFIG_SCSI_FC_ATTRS is not set
#
# SCSI low-level drivers
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=m
CONFIG_SCSI_SATA_PROMISE=m
-# CONFIG_SCSI_SATA_SX4 is not set
CONFIG_SCSI_SATA_SIL=m
-# CONFIG_SCSI_SATA_SIS is not set
CONFIG_SCSI_SATA_VIA=m
# CONFIG_SCSI_SATA_VITESSE is not set
# CONFIG_SCSI_BUSLOGIC is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_PCMCIA_FDOMAIN is not set
# CONFIG_PCMCIA_NINJA_SCSI is not set
CONFIG_PCMCIA_QLOGIC=m
-# CONFIG_PCMCIA_SYM53C500 is not set
#
# Multi-device support (RAID and LVM)
#
# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
CONFIG_NETFILTER=y
CONFIG_NETFILTER_DEBUG=y
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_IP_NF_COMPAT_IPCHAINS=m
CONFIG_IP_NF_COMPAT_IPFWADM=m
-# CONFIG_IP_NF_RAW is not set
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
#
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
CONFIG_LLC=m
CONFIG_LLC2=m
# CONFIG_IPX is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_8139_RXBUF_IDX=1
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=m
#
CONFIG_IXGB=y
CONFIG_IXGB_NAPI=y
-# CONFIG_S2IO is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
#
# Token Ring devices
#
# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
#
-# Wireless LAN (non-hamradio)
+# Wan interfaces
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_WAN is not set
#
# PCMCIA network device support
CONFIG_PCMCIA_AXNET=m
#
-# Wan interfaces
+# Amateur Radio support
#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
# CONFIG_FB_RIVA is not set
CONFIG_USB_KBTAB=m
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
CONFIG_USB_LEGOTOWER=m
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_TEST is not set
#
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_TEST=m
#
# Library routines
#
CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_PARISC=y
-CONFIG_MMU=y
-CONFIG_STACK_GROWSUP=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-# CONFIG_STANDALONE is not set
-CONFIG_BROKEN=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_HOTPLUG=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_EMBEDDED=y
-CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-# CONFIG_PA7000 is not set
-# CONFIG_PA7100LC is not set
-# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
-CONFIG_PA8X00=y
-CONFIG_PA20=y
-CONFIG_PREFETCH=y
-CONFIG_PARISC64=y
-CONFIG_64BIT=y
-# CONFIG_SMP is not set
-CONFIG_DISCONTIGMEM=y
-# CONFIG_PREEMPT is not set
-CONFIG_COMPAT=y
-
-#
-# Bus options (PCI, PCMCIA, EISA, GSC, ISA)
-#
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LEGACY_PROC=y
-CONFIG_PCI_NAMES=y
-CONFIG_PCI_LBA=y
-CONFIG_IOSAPIC=y
-CONFIG_IOMMU_SBA=y
-# CONFIG_SUPERIO is not set
-CONFIG_CHASSIS_LCD_LED=y
-# CONFIG_PDC_CHASSIS is not set
-
-#
-# PCMCIA/CardBus support
-#
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_DEBUG=y
-CONFIG_YENTA=m
-CONFIG_CARDBUS=y
-# CONFIG_I82092 is not set
-# CONFIG_TCIC is not set
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_FW_LOADER is not set
-CONFIG_DEBUG_DRIVER=y
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_CARMEL is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_BLK_DEV_INITRD=y
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_CHR_DEV_SG=y
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_MEGARAID is not set
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_CPQFCTS is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_IOMAPPED=y
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-CONFIG_SCSI_QLOGIC_FC=m
-# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-CONFIG_SCSI_QLA6312=m
-CONFIG_SCSI_QLA6322=m
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-CONFIG_SCSI_DEBUG=m
-
-#
-# PCMCIA SCSI adapter support
-#
-# CONFIG_PCMCIA_FDOMAIN is not set
-# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-# CONFIG_MD_RAID5 is not set
-# CONFIG_MD_RAID6 is not set
-# CONFIG_MD_MULTIPATH is not set
-# CONFIG_BLK_DEV_DM is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=m
-CONFIG_FUSION_MAX_SGE=40
-CONFIG_FUSION_ISENSE=m
-CONFIG_FUSION_CTL=m
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-CONFIG_NETLINK_DEV=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-# CONFIG_INET_IPCOMP is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-# CONFIG_IP_NF_NAT_LOCAL is not set
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-CONFIG_LLC=m
-CONFIG_LLC2=m
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-CONFIG_NET_PKTGEN=m
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=y
-CONFIG_TULIP=y
-# CONFIG_TULIP_MWI is not set
-CONFIG_TULIP_MMIO=y
-# CONFIG_TULIP_NAPI is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_DM9102 is not set
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_PCMCIA_XIRTULIP=m
-CONFIG_HP100=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
-CONFIG_EEPRO100=m
-# CONFIG_EEPRO100_PIO is not set
-CONFIG_E100=m
-CONFIG_E100_NAPI=y
-# CONFIG_FEALNX is not set
-CONFIG_NATSEMI=m
-# CONFIG_NE2K_PCI is not set
-# CONFIG_8139CP is not set
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-CONFIG_EPIC100=m
-# CONFIG_SUNDANCE is not set
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_E1000_NAPI=y
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-CONFIG_TIGON3=m
-
-#
-# Ethernet (10000 Mbit)
-#
-CONFIG_IXGB=m
-CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-CONFIG_NET_RADIO=y
-
-#
-# Obsolete Wireless cards support (pre-802.11)
-#
-# CONFIG_STRIP is not set
-CONFIG_PCMCIA_WAVELAN=m
-CONFIG_PCMCIA_NETWAVE=m
-
-#
-# Wireless 802.11 Frequency Hopping cards support
-#
-# CONFIG_PCMCIA_RAYCS is not set
-
-#
-# Wireless 802.11b ISA/PCI cards support
-#
-# CONFIG_AIRO is not set
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_PCI_HERMES=m
-# CONFIG_ATMEL is not set
-
-#
-# Wireless 802.11b Pcmcia/Cardbus cards support
-#
-CONFIG_PCMCIA_HERMES=m
-CONFIG_AIRO_CS=m
-# CONFIG_PCMCIA_WL3501 is not set
-
-#
-# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-#
-# CONFIG_PRISM54 is not set
-CONFIG_NET_WIRELESS=y
-
-#
-# PCMCIA network device support
-#
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
-# CONFIG_PCMCIA_FMVJ18X is not set
-# CONFIG_PCMCIA_PCNET is not set
-# CONFIG_PCMCIA_NMCLAN is not set
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-# CONFIG_PCMCIA_AXNET is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-# CONFIG_SERIO is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_CS is not set
-CONFIG_SERIAL_8250_NR_UARTS=8
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_MULTIPORT is not set
-# CONFIG_SERIAL_8250_RSA is not set
-
-#
-# Non-8250 serial port support
-#
-# CONFIG_SERIAL_MUX is not set
-CONFIG_PDC_CONSOLE=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-CONFIG_GEN_RTC=y
-CONFIG_GEN_RTC_X=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-
-#
-# PCMCIA character devices
-#
-# CONFIG_SYNCLINK_CS is not set
-CONFIG_RAW_DRIVER=y
-CONFIG_MAX_RAW_DEVS=256
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Console display driver support
-#
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE_COLUMNS=160
-CONFIG_DUMMY_CONSOLE_ROWS=64
-CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-# CONFIG_REISERFS_FS is not set
-CONFIG_JFS_FS=m
-# CONFIG_JFS_POSIX_ACL is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-CONFIG_XFS_FS=m
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_SECURITY is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=y
-CONFIG_SUNRPC_GSS=y
-CONFIG_RPCSEC_GSS_KRB5=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-CONFIG_NLS_CODEPAGE_863=m
-# CONFIG_NLS_CODEPAGE_864 is not set
-CONFIG_NLS_CODEPAGE_865=m
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-CONFIG_NLS_ISO8859_15=m
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=m
-
-#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_DEBUG_INFO is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-# CONFIG_CRYPTO_ARC4 is not set
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_TEST=m
-
-#
-# Library routines
-#
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=m
-CONFIG_ZLIB_DEFLATE=m
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_RWLOCK is not set
CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_INFO is not set
{
struct page *page = pte_page(pte);
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
+ if (VALID_PAGE(page) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page(page_address(page));
{
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
- seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %d-way associative)\n",
+ seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
- (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
- (cache_info.dc_conf.cc_assoc)
+ (cache_info.dc_conf.cc_sh ? " - shared I/D":"")
);
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
- printk("ic_size %lx dc_size %lx it_size %lx\n",
- cache_info.ic_size,
- cache_info.dc_size,
- cache_info.it_size);
-
- printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
- cache_info.dc_base,
- cache_info.dc_stride,
- cache_info.dc_count,
- cache_info.dc_loop);
-
- printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
- *(unsigned long *) (&cache_info.dc_conf),
- cache_info.dc_conf.cc_alias,
- cache_info.dc_conf.cc_block,
- cache_info.dc_conf.cc_line,
- cache_info.dc_conf.cc_shift);
- printk(" wt %d sh %d cst %d assoc %d\n",
- cache_info.dc_conf.cc_wt,
- cache_info.dc_conf.cc_sh,
- cache_info.dc_conf.cc_cst,
- cache_info.dc_conf.cc_assoc);
-
- printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
- cache_info.ic_base,
- cache_info.ic_stride,
- cache_info.ic_count,
- cache_info.ic_loop);
-
- printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
- *(unsigned long *) (&cache_info.ic_conf),
- cache_info.ic_conf.cc_alias,
- cache_info.ic_conf.cc_block,
- cache_info.ic_conf.cc_line,
- cache_info.ic_conf.cc_shift);
- printk(" wt %d sh %d cst %d assoc %d\n",
- cache_info.ic_conf.cc_wt,
- cache_info.ic_conf.cc_sh,
- cache_info.ic_conf.cc_cst,
- cache_info.ic_conf.cc_assoc);
-
- printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
- cache_info.dt_conf.tc_sh,
- cache_info.dt_conf.tc_page,
- cache_info.dt_conf.tc_cst,
- cache_info.dt_conf.tc_aid,
- cache_info.dt_conf.tc_pad1);
-
- printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
- cache_info.it_conf.tc_sh,
- cache_info.it_conf.tc_page,
- cache_info.it_conf.tc_cst,
- cache_info.it_conf.tc_aid,
- cache_info.it_conf.tc_pad1);
+ printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
+ cache_info.ic_size,
+ cache_info.dc_size,
+ cache_info.it_size,
+ sizeof (struct pdc_cache_info) / sizeof (long),
+ sizeof (struct pdc_cache_cf)
+ );
+
+ printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n",
+ cache_info.dc_base,
+ cache_info.dc_stride,
+ cache_info.dc_count,
+ cache_info.dc_loop);
+
+ printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
+ cache_info.dc_conf.cc_alias,
+ cache_info.dc_conf.cc_block,
+ cache_info.dc_conf.cc_line,
+ cache_info.dc_conf.cc_wt,
+ cache_info.dc_conf.cc_sh,
+ cache_info.dc_conf.cc_cst,
+ cache_info.dc_conf.cc_assoc);
+
+ printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
+ cache_info.ic_conf.cc_alias,
+ cache_info.ic_conf.cc_block,
+ cache_info.ic_conf.cc_line,
+ cache_info.ic_conf.cc_wt,
+ cache_info.ic_conf.cc_sh,
+ cache_info.ic_conf.cc_cst,
+ cache_info.ic_conf.cc_assoc);
+
+ printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.dt_conf.tc_sh,
+ cache_info.dt_conf.tc_page,
+ cache_info.dt_conf.tc_cst,
+ cache_info.dt_conf.tc_aid,
+ cache_info.dt_conf.tc_pad1);
+
+ printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.it_conf.tc_sh,
+ cache_info.it_conf.tc_page,
+ cache_info.it_conf.tc_cst,
+ cache_info.it_conf.tc_aid,
+ cache_info.it_conf.tc_pad1);
#endif
split_tlb = 0;
split_tlb = 1;
}
- /* "New and Improved" version from Jim Hull
- * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
- */
-#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
- dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
- icache_stride = CAFL_STRIDE(cache_info.ic_conf);
-#undef CAFL_STRIDE
-
+ dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) *
+ cache_info.dc_conf.cc_line;
+ icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) *
+ cache_info.ic_conf.cc_line;
#ifndef CONFIG_PA20
if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
- printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
-#if 0
+ printk(KERN_WARNING "Only equivalent aliasing supported\n");
+#ifndef CONFIG_SMP
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
disable_sr_hashing_asm(srhash_type);
}
-void flush_dcache_page(struct page *page)
+void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt = NULL;
unsigned long offset;
unsigned long addr;
pgoff_t pgoff;
- pte_t *pte;
- unsigned long pfn = page_to_pfn(page);
-
-
- if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
- return;
- }
flush_kernel_dcache_page(page_address(page));
* isn't there, there's no point exciting the
* nadtlb handler into a nullification frenzy */
-
- if(!(pte = translation_exists(mpnt, addr)))
+ if (!translation_exists(mpnt, addr))
continue;
- /* make sure we really have this page: the private
- * mappings may cover this area but have COW'd this
- * particular page */
- if(pte_pfn(*pte) != pfn)
- continue;
-
__flush_cache_page(mpnt, addr);
break;
}
flush_dcache_mmap_unlock(mapping);
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(__flush_dcache_page);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
/* Look up a PTE in a 2-Level scheme (faulting at each
* level if the entry isn't present
*
- * NOTE: we use ldw even for LP64, since the short pointers
- * can address up to 1TB
- */
+ * NOTE: we use ldw even for LP64 because our pte
+ * and pmd are allocated <4GB */
.macro L2_ptep pmd,pte,index,va,fault
#if PT_NLEVELS == 3
EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte
ldw,s \index(\pmd),\pmd
- bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
- DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
- copy \pmd,%r9
-#ifdef __LP64__
- shld %r9,PxD_VALUE_SHIFT,\pmd
-#else
- shlw %r9,PxD_VALUE_SHIFT,\pmd
-#endif
EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ bb,>=,n \pmd,_PAGE_PRESENT_BIT,\fault
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
LDREG %r0(\pmd),\pte /* pmd is now pte */
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
copy %r0,\pte
extrd,u,*= \va,31,32,%r0
ldw,s \index(\pgd),\pgd
- extrd,u,*= \va,31,32,%r0
- bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
- extrd,u,*= \va,31,32,%r0
- shld \pgd,PxD_VALUE_SHIFT,\index
- extrd,u,*= \va,31,32,%r0
- copy \index,\pgd
extrd,u,*<> \va,31,32,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+ extrd,u,*= \va,31,32,%r0
+ bb,>=,n \pgd,_PAGE_PRESENT_BIT,\fault
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
+ .macro update_dirty ptep,pte,tmp,tmp1
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptep)
ret_from_kernel_thread:
/* Call schedule_tail first though */
- BL schedule_tail, %r2
+ bl schedule_tail, %r2
nop
LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
and %r9,%r16,%r17
cmpb,<>,n %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
- BL get_register,%r25
+ b,l get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
- BL get_register,%r25
+ b,l get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
- BL set_register,%r25
+ b,l set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
nadtlb_nullify:
dbit_nolock_20w:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
dbit_nolock_11:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
dbit_nolock_20:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t1
+ f_extend pte,t0
idtlbt pte,prot
#include <asm/page.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/system.h>
#include <asm/processor.h> /* for boot_cpu_data */
*/
void pdc_emergency_unlock(void)
{
- /* Spinlock DEBUG code freaks out if we unconditionally unlock */
- if (spin_is_locked(&pdc_lock))
- spin_unlock(&pdc_lock);
+ spin_unlock(&pdc_lock);
}
#ifdef __LP64__
int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
{
- int retval = 0;
-
if (!is_pdc_pat())
return -1;
+ int retval = 0;
+
spin_lock_irq(&pdc_lock);
retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
spin_unlock_irq(&pdc_lock);
return retval;
}
-
-/**
- * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
- * @pci_addr: PCI configuration space address for which the read request is being made.
- * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
- * @mem_addr: Pointer to return memory buffer.
- *
- */
-int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
-{
- int retval;
- spin_lock_irq(&pdc_lock);
- retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
- __pa(pdc_result), pci_addr, pci_size);
- switch(pci_size) {
- case 1: *(u8 *) mem_addr = (u8) pdc_result[0];
- case 2: *(u16 *)mem_addr = (u16) pdc_result[0];
- case 4: *(u32 *)mem_addr = (u32) pdc_result[0];
- }
- spin_unlock_irq(&pdc_lock);
-
- return retval;
-}
-
-/**
- * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
- * @pci_addr: PCI configuration space address for which the write request is being made.
- * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
- * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
- * written to PCI Config space.
- *
- */
-int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
-{
- int retval;
-
- spin_lock_irq(&pdc_lock);
- retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
- pci_addr, pci_size, val);
- spin_unlock_irq(&pdc_lock);
-
- return retval;
-}
#endif /* __LP64__ */
long real64_call(unsigned long fn, ...)
{
va_list args;
- extern struct wide_stack real64_stack __attribute__ ((alias ("real_stack")));
+ extern struct wide_stack real_stack;
extern unsigned long real64_call_asm(unsigned long *,
unsigned long *,
unsigned long);
va_start(args, fn);
- real64_stack.arg0 = va_arg(args, unsigned long);
- real64_stack.arg1 = va_arg(args, unsigned long);
- real64_stack.arg2 = va_arg(args, unsigned long);
- real64_stack.arg3 = va_arg(args, unsigned long);
- real64_stack.arg4 = va_arg(args, unsigned long);
- real64_stack.arg5 = va_arg(args, unsigned long);
- real64_stack.arg6 = va_arg(args, unsigned long);
- real64_stack.arg7 = va_arg(args, unsigned long);
- real64_stack.arg8 = va_arg(args, unsigned long);
- real64_stack.arg9 = va_arg(args, unsigned long);
- real64_stack.arg10 = va_arg(args, unsigned long);
- real64_stack.arg11 = va_arg(args, unsigned long);
- real64_stack.arg12 = va_arg(args, unsigned long);
- real64_stack.arg13 = va_arg(args, unsigned long);
+ real_stack.arg0 = va_arg(args, unsigned long);
+ real_stack.arg1 = va_arg(args, unsigned long);
+ real_stack.arg2 = va_arg(args, unsigned long);
+ real_stack.arg3 = va_arg(args, unsigned long);
+ real_stack.arg4 = va_arg(args, unsigned long);
+ real_stack.arg5 = va_arg(args, unsigned long);
+ real_stack.arg6 = va_arg(args, unsigned long);
+ real_stack.arg7 = va_arg(args, unsigned long);
+ real_stack.arg8 = va_arg(args, unsigned long);
+ real_stack.arg9 = va_arg(args, unsigned long);
+ real_stack.arg10 = va_arg(args, unsigned long);
+ real_stack.arg11 = va_arg(args, unsigned long);
+ real_stack.arg12 = va_arg(args, unsigned long);
+ real_stack.arg13 = va_arg(args, unsigned long);
va_end(args);
- return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
+ return real64_call_asm(&real_stack.sp, &real_stack.arg0, fn);
}
#endif /* __LP64__ */
{HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
{HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
{HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
- {HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
- {HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
- {HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
- {HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
- {HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
- {HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
- {HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
- {HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
- {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
- {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
- {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak"},
{HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"},
{HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"},
{HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"},
{HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"},
{HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"},
{HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"},
- {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"},
- {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"},
- {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
- {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
- {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O Bus Converter Merced Port"},
+ {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O Bus Converter Ropes Port"},
+ {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O Bus Converter Merced Port"},
+ {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O Bus Converter Ropes Port"},
{HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
{HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
{HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
{HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"},
- {HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"},
- {HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"},
{HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"},
{HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"},
{HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"},
{HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
{HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
{HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
- {HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
{HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"},
{HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"},
{HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"},
{HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"},
{HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
- {HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
{HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"},
{HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"},
{HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"},
{HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"},
- {HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"},
{HPHW_FAULTY, 0, } /* Special Marker for last entry */
};
[pcxw] { "PA8500 (PCX-W)", "2.0" },
[pcxw_] { "PA8600 (PCX-W+)", "2.0" },
[pcxw2] { "PA8700 (PCX-W2)", "2.0" },
- [mako] { "PA8800 (Mako)", "2.0" }
+ [mako] { "PA8800 (MAKO)", "2.0" }
};
const char * __init
/* Initialize startup VM. Just map first 8 MB of memory */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
- shr %r1,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r1),%r3
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
1:
stw %r3,0(%r4)
- ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ ldo ASM_PAGE_SIZE(%r3),%r3
addib,> -1,%r1,1b
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
ldil L%PA(smp_init_current_idle_task),%sp
ldo R%PA(smp_init_current_idle_task)(%sp),%sp
ldw 0(%sp),%sp /* load task address */
- tophys_r1 %sp
- ldw TASK_THREAD_INFO(%sp), %sp
mtctl %sp,%cr30 /* store in cr30 */
addil L%THREAD_SZ_ALGN,%sp /* stack is above task */
ldo R%THREAD_SZ_ALGN(%r1),%sp
ldil L%PA(pmd0),%r5
ldo R%PA(pmd0)(%r5),%r5
- shrd %r5,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r5),%r3
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
- shrd %r1,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r1),%r3
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r5
ldi ASM_PT_INITIAL,%r1
1:
stw %r3,0(%r5)
- ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ ldo ASM_PAGE_SIZE(%r3),%r3
addib,> -1,%r1,1b
ldo ASM_PMD_ENTRY_SIZE(%r5),%r5
- ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
ldd 0(%sp),%sp /* load task address */
- tophys_r1 %sp
ldd TASK_THREAD_INFO(%sp), %sp
mtctl %sp,%cr30 /* store in cr30 */
ldo THREAD_SZ_ALGN(%sp),%sp
#include <linux/mm.h>
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/mmzone.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
#endif
};
+struct irq_region_ops cpu_irq_ops = {
+ .disable_irq = disable_cpu_irq,
+ .enable_irq = enable_cpu_irq,
+ .mask_irq = unmask_cpu_irq,
+ .unmask_irq = unmask_cpu_irq
+};
struct irq_region cpu0_irq_region = {
.ops = {
{
struct irq_region *region;
- DBG_IRQ(irq, ("enable_irq(%d) %d+%d EIRR 0x%lx EIEM 0x%lx\n", irq,
- IRQ_REGION(irq), IRQ_OFFSET(irq), mfctl(23), mfctl(15)));
+ DBG_IRQ(irq, ("enable_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_canonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
seq_puts(p, " ");
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
#endif
- seq_printf(p, " CPU%02d ", i);
+ seq_printf(p, " CPU%02d ", i);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, "[min/avg/max] (CPU cycle counts)");
seq_printf(p, "%3d: ", irq_no);
#ifdef CONFIG_SMP
for (; j < NR_CPUS; j++)
- if (cpu_online(j))
#endif
seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq_no]);
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
- while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
- !cpu_online(next_cpu)))
+ while ((next_cpu < NR_CPUS) && !cpu_data[next_cpu].txn_addr)
next_cpu++;
if (next_cpu >= NR_CPUS)
irq_enter();
++kstat_cpu(cpu).irqs[irq];
- DBG_IRQ(irq, ("do_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
+ DBG_IRQ(irq, ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
for (; action; action = action->next) {
#ifdef PARISC_IRQ_CR16_COUNTS
#ifdef DEBUG_IRQ
if (eirr_val != (1UL << MAX_CPU_IRQ))
- printk(KERN_DEBUG "do_cpu_irq_mask 0x%x & 0x%x\n", eirr_val, cpu_eiem);
+ printk(KERN_DEBUG "do_cpu_irq_mask %x\n", eirr_val);
#endif
/* Work our way from MSb to LSb...same order we alloc EIRs */
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
- mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
+ mtctl(-1L, 23); /* EIRR : clear all pending external intr */
#ifdef CONFIG_SMP
if (!cpu_eiem)
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
-
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(pfnnid_map);
-#endif
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
-#ifdef DEBUG_PCI
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
static struct proc_dir_entry * proc_gsc_root = NULL;
static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
static unsigned long pcxl_used_bytes = 0;
* interfaces to log Chassis Codes via PDC (firmware)
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
- * Copyright (C) 2002-2004 Thibaut VARENE <varenet@esiee.fr>
+ * Copyright (C) 2002-2003 Thibaut Varene <varenet@esiee.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
-#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#ifdef CONFIG_PDC_CHASSIS
static int pdc_chassis_old = 0;
-static unsigned int pdc_chassis_enabled = 1;
-
-
-/**
- * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
- * @str configuration param: 0 to disable chassis log
- * @return 1
- */
-
-static int __init pdc_chassis_setup(char *str)
-{
- /*panic_timeout = simple_strtoul(str, NULL, 0);*/
- get_option(&str, &pdc_chassis_enabled);
- return 1;
-}
-__setup("pdcchassis=", pdc_chassis_setup);
/**
{
#ifdef CONFIG_PDC_CHASSIS
int handle = 0;
- if (pdc_chassis_enabled) {
- DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
-
- /* Let see if we have something to handle... */
- /* Check for PDC_PAT or old LED Panel */
- pdc_chassis_checkold();
- if (is_pdc_pat()) {
- printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
- handle = 1;
- }
- else if (pdc_chassis_old) {
- printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
- handle = 1;
- }
- if (handle) {
- /* initialize panic notifier chain */
- notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+ DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
- /* initialize reboot notifier chain */
- register_reboot_notifier(&pdc_chassis_reboot_block);
- }
+ /* Let see if we have something to handle... */
+ /* Check for PDC_PAT or old LED Panel */
+ pdc_chassis_checkold();
+ if (is_pdc_pat()) {
+#ifdef __LP64__ /* see pdc_chassis_send_status() */
+ printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
+ handle = 1;
+#endif /* __LP64__ */
+ }
+ else if (pdc_chassis_old) {
+ printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
+ handle = 1;
+ }
+
+ if (handle) {
+ /* initialize panic notifier chain */
+ notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+
+ /* initialize reboot notifier chain */
+ register_reboot_notifier(&pdc_chassis_reboot_block);
}
#endif /* CONFIG_PDC_CHASSIS */
}
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
- if (pdc_chassis_enabled) {
-
- DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
-
-#ifdef CONFIG_PARISC64
- if (is_pdc_pat()) {
- switch(message) {
- case PDC_CHASSIS_DIRECT_BSTART:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
- break;
-
- case PDC_CHASSIS_DIRECT_BCOMPLETE:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
- break;
-
- case PDC_CHASSIS_DIRECT_SHUTDOWN:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
- break;
-
- case PDC_CHASSIS_DIRECT_PANIC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
- break;
-
- case PDC_CHASSIS_DIRECT_LPMC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
- break;
-
- case PDC_CHASSIS_DIRECT_HPMC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
- break;
-
- default:
- retval = -1;
- }
- } else retval = -1;
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
+
+#ifdef __LP64__ /* pdc_pat_chassis_send_log is defined only when #ifdef __LP64__ */
+ if (is_pdc_pat()) {
+ switch(message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
+ break;
+
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
#else
- if (pdc_chassis_old) {
- switch (message) {
- case PDC_CHASSIS_DIRECT_BSTART:
- case PDC_CHASSIS_DIRECT_BCOMPLETE:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
- break;
-
- case PDC_CHASSIS_DIRECT_SHUTDOWN:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
- break;
-
- case PDC_CHASSIS_DIRECT_HPMC:
- case PDC_CHASSIS_DIRECT_PANIC:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
- break;
-
- case PDC_CHASSIS_DIRECT_LPMC:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
- break;
-
- default:
- retval = -1;
- }
- } else retval = -1;
-#endif /* CONFIG_PARISC64 */
- } /* if (pdc_chassis_enabled) */
+ if (pdc_chassis_old) {
+ switch (message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#endif /* __LP64__ */
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}
/*
* These bracket the sleeping functions..
*/
+# define first_sched ((unsigned long) scheduling_functions_start_here)
+# define last_sched ((unsigned long) scheduling_functions_end_here)
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
- if (!in_sched_functions(ip))
+ if (ip < first_sched || ip >= last_sched)
return ip;
} while (count++ < 16);
return 0;
+# undef first_sched
+# undef last_sched
}
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/cpu.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
cpu_irq_actions[cpuid] = actions;
}
#endif
-
- /*
- * Bring this CPU up now! (ignore bootstrap cpuid == 0)
- */
-#ifdef CONFIG_SMP
- if (cpuid) {
- cpu_set(cpuid, cpu_present_map);
- cpu_up(cpuid);
- }
-#endif
-
return 0;
}
.section .bss
.export real_stack
- .export real32_stack
- .export real64_stack
.align 64
real_stack:
-real32_stack:
-real64_stack:
.block 8192
#ifdef __LP64__
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root = NULL;
struct proc_dir_entry * proc_gsc_root = NULL;
-struct proc_dir_entry * proc_mckinley_root = NULL;
-
void __init setup_cmdline(char **cmdline_p)
{
case pcxw:
case pcxw_:
case pcxw2:
+ case mako: /* XXX : this is really mckinley bus */
if (NULL == proc_runway_root)
{
proc_runway_root = proc_mkdir("bus/runway", 0);
}
break;
- case mako:
- if (NULL == proc_mckinley_root)
- {
- proc_mckinley_root = proc_mkdir("bus/mckinley", 0);
- }
- break;
default:
/* FIXME: this was added to prevent the compiler
* complaining about missing pcx, pcxs and pcxt
#define kDEBUG 0
+spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
+
spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
volatile struct task_struct *smp_init_current_idle_task;
static volatile int cpu_now_booting = 0; /* track which CPU is booting */
+static int parisc_max_cpus = -1; /* Command line */
unsigned long cache_decay_ticks; /* declared by include/linux/sched.h */
-
-static int parisc_max_cpus = 1;
-
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
-
cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */
+cpumask_t cpu_possible_map = CPU_MASK_NONE; /* Bitmap of Present CPUs */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
{
int i;
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < parisc_max_cpus; i++) {
if (cpu_online(i) && i != smp_processor_id())
send_IPI_single(i, op);
}
unsigned long timeout;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- if (num_online_cpus() < 2)
- return 0;
-
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
EXPORT_SYMBOL(smp_call_function);
+
+
+/*
+ * Setup routine for controlling SMP activation
+ *
+ * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
+ * activation entirely (the MPS table probe still happens, though).
+ *
+ * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
+ * greater than 0, limits the maximum number of CPUs activated in
+ * SMP mode to <NUM>.
+ */
+
+static int __init nosmp(char *str)
+{
+ parisc_max_cpus = 0;
+ return 1;
+}
+
+__setup("nosmp", nosmp);
+
+static int __init maxcpus(char *str)
+{
+ get_option(&str, &parisc_max_cpus);
+ return 1;
+}
+
+__setup("maxcpus=", maxcpus);
+
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
panic("smp_callin() AAAAaaaaahhhh....\n");
}
+#if 0
/*
* Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
* because that could end up calling schedule(). If it did, the new idle
/*
* Bring one cpu online.
*/
-int __init smp_boot_one_cpu(int cpuid)
+int __init smp_boot_one_cpu(int cpuid, int cpunum)
{
struct task_struct *idle;
long timeout;
panic("SMP: fork failed for CPU:%d", cpuid);
wake_up_forked_process(idle);
- init_idle(idle, cpuid);
+ init_idle(idle, cpunum);
unhash_process(idle);
- idle->thread_info->cpu = cpuid;
+ idle->thread_info->cpu = cpunum;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
- cpu_now_booting = cpuid;
+ cpu_now_booting = cpunum;
/*
** boot strap code needs to know the task address since
smp_init_current_idle_task = idle ;
mb();
- printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
-
/*
** This gets PDC to release the CPU from a very tight loop.
- **
- ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
- ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
- ** is executed after receiving the rendezvous signal (an interrupt to
- ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
- ** contents of memory are valid."
+ ** See MEM_RENDEZ comments in head.S.
*/
- __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpuid].hpa);
+ __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
mb();
/*
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
- if(cpu_online(cpuid)) {
+ if(cpu_online(cpunum)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
alive:
/* Remember the Slave data */
#if (kDEBUG>=100)
- printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
- cpuid, timeout * 100);
+ printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
+ cpuid, cpunum, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
- cpu_data[cpuid].state = STATE_RUNNING;
+ cpu_data[cpunum].state = STATE_RUNNING;
#endif
return 0;
}
+#endif
+
void __devinit smp_prepare_boot_cpu(void)
{
#endif
/* Setup BSP mappings */
- printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ init_task.thread_info->cpu = bootstrap_processor;
+ current->thread_info->cpu = bootstrap_processor;
cpu_set(bootstrap_processor, cpu_online_map);
- cpu_set(bootstrap_processor, cpu_present_map);
+ cpu_set(bootstrap_processor, cpu_possible_map);
+
+ /* Mark Boostrap processor as present */
+ current->active_mm = &init_mm;
cache_decay_ticks = HZ/100; /* FIXME very rough. */
}
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- cpus_clear(cpu_present_map);
- cpu_set(0, cpu_present_map);
- parisc_max_cpus = max_cpus;
- if (!max_cpus)
- printk(KERN_INFO "SMP mode deactivated.\n");
+ if (max_cpus != -1)
+ printk(KERN_INFO "SMP: Limited to %d CPUs\n", max_cpus);
+
+ printk(KERN_INFO "SMP: Monarch CPU activated (%lu.%02lu BogoMIPS)\n",
+ (cpu_data[0].loops_per_jiffy + 25) / 5000,
+ ((cpu_data[0].loops_per_jiffy + 25) / 50) % 100);
+
+ return;
}
int __devinit __cpu_up(unsigned int cpu)
{
- if (cpu != 0 && cpu < parisc_max_cpus)
- smp_boot_one_cpu(cpu);
-
return cpu_online(cpu) ? 0 : -ENOSYS;
}
asmlinkage long sys32_time(compat_time_t *tloc)
{
- struct timeval tv;
- compat_time_t now32;
+ struct timeval tv;
do_gettimeofday(&tv);
- now32 = tv.tv_sec;
+ compat_time_t now32 = tv.tv_sec;
if (tloc)
if (put_user(now32, tloc))
put_user(reclen, &dirent->d_reclen);
copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen);
- dirent = (struct linux32_dirent *)((char *)dirent + reclen);
+ ((char *) dirent) += reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
-#include <asm/unwind.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
/* dumped to the console via printk) */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
-#endif
-
int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
void dump_stack(void)
{
- show_stack(NULL, NULL);
+ unsigned long stack;
+ show_trace(current, &stack);
}
EXPORT_SYMBOL(dump_stack);
-void show_stack(struct task_struct *task, unsigned long *s)
+#ifndef __LP64__
+static int kstack_depth_to_print = 64 * 4;
+#else
+static int kstack_depth_to_print = 128 * 4;
+#endif
+
+void show_stack(struct task_struct *task, unsigned long *sp)
{
- int i = 1;
- struct unwind_frame_info info;
-
- if (!task) {
- unsigned long sp, ip, rp;
-
-HERE:
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- ip = (unsigned long)&&HERE;
- rp = (unsigned long)__builtin_return_address(0);
- unwind_frame_init(&info, current, sp, ip, rp);
- } else {
- unwind_frame_init_from_blocked_task(&info, task);
- }
+ unsigned long *stack;
+ int i;
- printk("Backtrace:\n");
- while (i <= 16) {
- if (unwind_once(&info) < 0 || info.ip == 0)
+ /*
+ * debugging aid: "show_stack(NULL);" prints the
+ * back trace for this cpu.
+ */
+ if (task==NULL)
+ sp = (unsigned long*)&sp;
+ else if(sp == NULL)
+ sp = (unsigned long*)task->thread.regs.ksp;
+
+ stack = sp;
+ printk("\n" KERN_CRIT "Stack Dump:\n");
+ printk(KERN_CRIT " " RFMT ": ", (unsigned long) stack);
+ for (i=0; i < kstack_depth_to_print; i++) {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
+ if (i && ((i & 0x03) == 0))
+ printk("\n" KERN_CRIT " " RFMT ": ",
+ (unsigned long) stack);
+ printk(RFMT " ", *stack--);
+ }
+ printk("\n" KERN_CRIT "\n");
+ show_trace(task, sp);
+}
- if (__kernel_text_address(info.ip)) {
- printk(" [<" RFMT ">] ", info.ip);
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *startstack;
+ unsigned long addr;
+ int i;
+
+ startstack = (unsigned long *)((unsigned long)stack & ~(THREAD_SIZE - 1));
+ i = 1;
+ stack = (long *)((long)(stack + 32) &~ (FRAME_SIZE-1)); /* Align */
+ printk("Kernel addresses on the stack:\n");
+ while (stack > startstack) {
+ stack -= 16; /* Stack frames are a multiple of 16 words */
+ addr = stack[16 - RP_OFFSET / sizeof(long)];
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+ printk(" [<" RFMT ">] ", addr);
#ifdef CONFIG_KALLSYMS
- print_symbol("%s\n", info.ip);
+ print_symbol("%s\n", addr);
#else
if ((i & 0x03) == 0)
printk("\n");
* understand what is happening here
*/
+/*
+ * J. David Anglin writes:
+ *
+ * "You have to adjust the current sp to that at the begining of the function.
+ * There can be up to two stack additions to allocate the frame in the
+ * prologue. Similar things happen in the epilogue. In the presence of
+ * interrupts, you have to be concerned about where you are in the function
+ * and what stack adjustments have taken place."
+ *
+ * For now these cases are not handled, but they should be!
+ */
+
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#define dbg(x...)
#endif
-extern struct unwind_table_entry __start___unwind[];
-extern struct unwind_table_entry __stop___unwind[];
+extern const struct unwind_table_entry __start___unwind[];
+extern const struct unwind_table_entry __stop___unwind[];
static spinlock_t unwind_lock;
/*
const struct unwind_table_entry *e = 0;
unsigned long lo, hi, mid;
+ addr -= table->base_addr;
+
for (lo = 0, hi = table->length; lo < hi; )
{
mid = (lo + hi) / 2;
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
- void *table_start, void *table_end)
+ const void *table_start, const void *table_end)
{
- struct unwind_table_entry *start = table_start;
- struct unwind_table_entry *end =
- (struct unwind_table_entry *)table_end - 1;
+ const struct unwind_table_entry *start = table_start;
+ const struct unwind_table_entry *end = table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
- table->length = end - start + 1;
+ table->length = end - start;
table->next = NULL;
-
- for (; start <= end; start++) {
- start->region_start += base_addr;
- start->region_end += base_addr;
- }
}
void *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
- void *start, void *end)
+ const void *start, const void *end)
{
struct unwind_table *table;
unsigned long flags;
sp = info->prev_sp;
} while (info->prev_ip < (unsigned long)_stext ||
info->prev_ip > (unsigned long)_etext);
-
- dbg("analyzing func @ %lx with no unwind info, setting prev_sp=%lx prev_ip=%lx\n", info->ip, info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, Save_RP = %d size = %u\n",
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
- dbg("analyzing func @ %lx, insn=%08x @ %lx, frame_size = %ld\n", info->ip, insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x7ec00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
- dbg("analyzing func @ %lx, insn=%08x @ %lx, frame_size = %ld\n", info->ip, insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
- dbg("analyzing func @ %lx, insn=stw rp,-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
- dbg("analyzing func @ %lx, insn=std rp,-16(sp) @ %lx\n", info->ip, npc);
}
}
info->prev_sp = info->sp - frame_size;
if (rpoffset)
- info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
- info->prev_ip = info->rp;
- info->rp = 0;
-
- dbg("analyzing func @ %lx, setting prev_sp=%lx prev_ip=%lx\n", info->ip, info->prev_sp, info->prev_ip);
+ info->prev_ip = *(unsigned long *)(info->prev_sp - rpoffset);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
- unsigned long sp, unsigned long ip, unsigned long rp)
+ struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
- info->sp = sp;
- info->ip = ip;
- info->rp = rp;
+ info->sp = regs->ksp;
+ info->ip = regs->kpc;
- dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", t ? (int)t->pid : 0, info->sp, info->ip);
+ dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", (int)t->pid, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *regs = &t->thread.regs;
- unwind_frame_init(info, t, regs->ksp, regs->kpc, 0);
-}
-
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
-{
- unwind_frame_init(info, current, regs->gr[30], regs->iaoq[0],
- regs->gr[2]);
+ unwind_frame_init(info, t, regs);
}
int unwind_once(struct unwind_frame_info *next_frame)
#
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o
-
-lib-$(CONFIG_SMP) += debuglocks.o
unsigned long __xchg32(int x, int *ptr)
{
unsigned long flags;
- long temp;
+ unsigned long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- temp = (long) *ptr; /* XXX - sign extension wanted? */
+ (long) temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
- return (unsigned long)temp;
+ return temp;
}
unsigned long __xchg8(char x, char *ptr)
{
unsigned long flags;
- long temp;
+ unsigned long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- temp = (long) *ptr; /* XXX - sign extension wanted? */
+ (long) temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
- return (unsigned long)temp;
+ return temp;
}
+++ /dev/null
-/*
- * Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 2004 Thibaut VARENE <varenet@esiee.fr>
- *
- * Some code stollen from alpha & sparc64 ;)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <asm/hardirq.h> /* in_interrupt() */
-
-#undef INIT_STUCK
-#define INIT_STUCK 1L << 30
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-
-void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
- volatile unsigned int *a;
- long stuck = INIT_STUCK;
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- int printed = 0;
- int cpu = smp_processor_id();
-
-try_again:
-
- /* Do the actual locking */
- /* <T-Bone> ggg: we can't get stuck on the outter loop?
- * <ggg> T-Bone: We can hit the outer loop
- * alot if multiple CPUs are constantly racing for a lock
- * and the backplane is NOT fair about which CPU sees
- * the update first. But it won't hang since every failed
- * attempt will drop us back into the inner loop and
- * decrement `stuck'.
- * <ggg> K-class and some of the others are NOT fair in the HW
- * implementation so we could see false positives.
- * But fixing the lock contention is easier than
- * fixing the HW to be fair.
- * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
- * spin until the value of the lock changes, or we time out.
- */
- a = __ldcw_align(lock);
- while (stuck && (__ldcw(a) == 0))
- while ((*a == 0) && --stuck);
-
- if (unlikely(stuck <= 0)) {
- printk(KERN_WARNING
- "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
- " owned by %s:%d in %s at %p(%d)\n",
- base_file, line_no, lock->module, lock,
- current->comm, inline_pc, cpu,
- lock->bfile, lock->bline, lock->task->comm,
- lock->previous, lock->oncpu);
- stuck = INIT_STUCK;
- printed = 1;
- goto try_again;
- }
-
- /* Exiting. Got the lock. */
- lock->oncpu = cpu;
- lock->previous = inline_pc;
- lock->task = current;
- lock->bfile = (char *)base_file;
- lock->bline = line_no;
-
- if (unlikely(printed)) {
- printk(KERN_WARNING
- "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
- base_file, line_no, current->comm, inline_pc,
- cpu, jiffies - started);
- }
-}
-
-void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
-{
- CHECK_LOCK(lock);
- volatile unsigned int *a = __ldcw_align(lock);
- if (unlikely((*a != 0) && lock->babble)) {
- lock->babble--;
- printk(KERN_WARNING
- "%s:%d: spin_unlock(%s:%p) not locked\n",
- base_file, line_no, lock->module, lock);
- }
- *a = 1;
-}
-
-int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
- int ret;
- volatile unsigned int *a = __ldcw_align(lock);
- if ((ret = (__ldcw(a) != 0))) {
- lock->oncpu = smp_processor_id();
- lock->previous = __builtin_return_address(0);
- lock->task = current;
- } else {
- lock->bfile = (char *)base_file;
- lock->bline = line_no;
- }
- return ret;
-}
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-
-/* Interrupts trouble detailed explanation, thx Grant:
- *
- * o writer (wants to modify data) attempts to acquire the rwlock
- * o He gets the write lock.
- * o Interupts are still enabled, we take an interrupt with the
- * write still holding the lock.
- * o interrupt handler tries to acquire the rwlock for read.
- * o deadlock since the writer can't release it at this point.
- *
- * In general, any use of spinlocks that competes between "base"
- * level and interrupt level code will risk deadlock. Interrupts
- * need to be disabled in the base level routines to avoid it.
- * Or more precisely, only the IRQ the base level routine
- * is competing with for the lock. But it's more efficient/faster
- * to just disable all interrupts on that CPU to guarantee
- * once it gets the lock it can release it quickly too.
- */
-
-void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
-{
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- long stuck = INIT_STUCK;
- int printed = 0;
- int cpu = smp_processor_id();
-
- if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */
- printk(KERN_WARNING "write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
- BUG();
- }
-
- /* Note: if interrupts are disabled (which is most likely), the printk
- will never show on the console. We might need a polling method to flush
- the dmesg buffer anyhow. */
-
-retry:
- _raw_spin_lock(&rw->lock);
-
- if(rw->counter != 0) {
- /* this basically never happens */
- _raw_spin_unlock(&rw->lock);
-
- stuck--;
- if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
- printk(KERN_WARNING
- "%s:%d: write_lock stuck on writer"
- " in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- stuck = INIT_STUCK;
- printed = 1;
- }
- else if (unlikely(stuck <= 0)) {
- printk(KERN_WARNING
- "%s:%d: write_lock stuck on reader"
- " in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- stuck = INIT_STUCK;
- printed = 1;
- }
-
- while(rw->counter != 0);
-
- goto retry;
- }
-
- /* got it. now leave without unlocking */
- rw->counter = -1; /* remember we are locked */
-
- if (unlikely(printed)) {
- printk(KERN_WARNING
- "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- }
-}
-
-void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
-{
-#if 0
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- int cpu = smp_processor_id();
-#endif
- unsigned long flags;
-
- local_irq_save(flags);
- _raw_spin_lock(&rw->lock);
-
- rw->counter++;
-#if 0
- printk(KERN_WARNING
- "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
-#endif
- _raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
-}
-
-#endif /* CONFIG_DEBUG_RWLOCK */
goto bytecopy;
while (dest & 3) {
writeb(*(char *)src, dest++);
- src++;
+ ((char *)src)++;
count--;
}
while (count > 3) {
bytecopy:
while (count--) {
writeb(*(char *)src, dest++);
- src++;
+ ((char *)src)++;
}
}
/* Then check for misaligned start address */
if (src & 1) {
*(u8 *)dest = readb(src);
- src++;
- dest++;
+ ((u8 *)src)++;
+ ((u8 *)dest)++;
count--;
if (count < 2) goto bytecopy;
}
if (src & 2) {
*(u16 *)dest = __raw_readw(src);
- src += 2;
- dest += 2;
- count -= 2;
+ ((u16 *)src)++;
+ ((u16 *)dest)++;
+ count-=2;
}
while (count > 3) {
shortcopy:
while (count > 1) {
*(u16 *)dest = __raw_readw(src);
- src += 2;
- dest += 2;
- count -= 2;
+ ((u16 *)src)++;
+ ((u16 *)dest)++;
+ count-=2;
}
bytecopy:
while (count--) {
*(char *)dest = readb(src);
- src++;
- dest++;
+ ((char *)src)++;
+ ((char *)dest)++;
}
}
*/
void insb (unsigned long port, void *dst, unsigned long count)
{
- unsigned char *p;
-
- p = (unsigned char *)dst;
-
- while (((unsigned long)p) & 0x3) {
+ while (((unsigned long)dst) & 0x3) {
if (!count)
return;
count--;
- *p = inb(port);
- p++;
+ *(unsigned char *) dst = inb(port);
+ ((unsigned char *) dst)++;
}
while (count >= 4) {
w |= inb(port) << 16;
w |= inb(port) << 8;
w |= inb(port);
- *(unsigned int *) p = w;
- p += 4;
+ *(unsigned int *) dst = w;
+ ((unsigned int *) dst)++;
}
while (count) {
--count;
- *p = inb(port);
- p++;
+ *(unsigned char *) dst = inb(port);
+ ((unsigned char *) dst)++;
}
}
void insw (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
- unsigned char *p;
-
- p = (unsigned char *)dst;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
- *(unsigned int *)p = l;
- p += 4;
+ *(unsigned int *) dst = l;
+ ((unsigned int *) dst)++;
}
if (count) {
- *(unsigned short *)p = cpu_to_le16(inw(port));
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
}
break;
case 0x02: /* Buffer 16-bit aligned */
- *(unsigned short *)p = cpu_to_le16(inw(port));
- p += 2;
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
+ ((unsigned short *) dst)++;
count--;
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
- *(unsigned int *)p = l;
- p += 4;
+ *(unsigned int *) dst = l;
+ ((unsigned int *) dst)++;
}
if (count) {
- *(unsigned short *)p = cpu_to_le16(inw(port));
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
}
break;
--count;
l = cpu_to_le16(inw(port));
- *p = l >> 8;
- p++;
+ *(unsigned char *) dst = l >> 8;
+ ((unsigned char *) dst)++;
while (count--)
{
l2 = cpu_to_le16(inw(port));
- *(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
- p += 2;
+ *(unsigned short *) dst = (l & 0xff) << 8 | (l2 >> 8);
+ ((unsigned short *) dst)++;
l = l2;
}
- *p = l & 0xff;
+ *(unsigned char *) dst = l & 0xff;
break;
}
}
void insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
- unsigned char *p;
-
- p = (unsigned char *)dst;
if (!count)
return;
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
- *(unsigned int *)p = cpu_to_le32(inl(port));
- p += 4;
+ *(unsigned int *) dst = cpu_to_le32(inl(port));
+ ((unsigned int *) dst)++;
}
break;
--count;
l = cpu_to_le32(inl(port));
- *(unsigned short *)p = l >> 16;
- p += 2;
+ *(unsigned short *) dst = l >> 16;
+ ((unsigned short *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
- p += 4;
+ *(unsigned int *) dst = (l & 0xffff) << 16 | (l2 >> 16);
+ ((unsigned int *) dst)++;
l = l2;
}
- *(unsigned short *)p = l & 0xffff;
+ *(unsigned short *) dst = l & 0xffff;
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
- *(unsigned char *)p = l >> 24;
- p++;
- *(unsigned short *)p = (l >> 8) & 0xffff;
- p += 2;
+ *(unsigned char *) dst = l >> 24;
+ ((unsigned char *) dst)++;
+ *(unsigned short *) dst = (l >> 8) & 0xffff;
+ ((unsigned short *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
- p += 4;
+ *(unsigned int *) dst = (l & 0xff) << 24 | (l2 >> 8);
+ ((unsigned int *) dst)++;
l = l2;
}
- *p = l & 0xff;
+ *(unsigned char *) dst = l & 0xff;
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
- *p = l >> 24;
- p++;
+ *(unsigned char *) dst = l >> 24;
+ ((unsigned char *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
- p += 4;
+ *(unsigned int *) dst = (l & 0xffffff) << 8 | l2 >> 24;
+ ((unsigned int *) dst)++;
l = l2;
}
- *(unsigned short *)p = (l >> 8) & 0xffff;
- p += 2;
- *p = l & 0xff;
+ *(unsigned short *) dst = (l >> 8) & 0xffff;
+ ((unsigned short *) dst)++;
+ *(unsigned char *) dst = l & 0xff;
break;
}
}
*/
void outsb(unsigned long port, const void * src, unsigned long count)
{
- const unsigned char *p;
-
- p = (const unsigned char *)src;
while (count) {
count--;
- outb(*p, port);
- p++;
+ outb(*(char *)src, port);
+ ((char *) src)++;
}
}
void outsw (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
- const unsigned char *p;
-
- p = (const unsigned char *)src;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) src) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
- l = *(unsigned int *)p;
- p += 4;
+ l = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
- outw(le16_to_cpu(*(unsigned short*)p), port);
+ outw(le16_to_cpu(*(unsigned short*)src), port);
}
break;
case 0x02: /* Buffer 16-bit aligned */
- outw(le16_to_cpu(*(unsigned short*)p), port);
- p += 2;
+ outw(le16_to_cpu(*(unsigned short*)src), port);
+ ((unsigned short *) src)++;
count--;
while (count>=2) {
count -= 2;
- l = *(unsigned int *)p;
- p += 4;
+ l = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
- outw(le16_to_cpu(*(unsigned short *)p), port);
+ outw(le16_to_cpu(*(unsigned short*)src), port);
}
break;
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
- l = *p << 8;
- p++;
+ l = *(unsigned char *) src << 8;
+ ((unsigned char *) src)++;
count--;
while (count)
{
count--;
- l2 = *(unsigned short *)p;
- p += 2;
+ l2 = *(unsigned short *) src;
+ ((unsigned short *) src)++;
outw(le16_to_cpu(l | l2 >> 8), port);
l = l2 << 8;
}
- l2 = *(unsigned char *)p;
+ l2 = *(unsigned char *) src;
outw (le16_to_cpu(l | l2>>8), port);
break;
void outsl (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
- const unsigned char *p;
-
- p = (const unsigned char *)src;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) src) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
- outl(le32_to_cpu(*(unsigned int *)p), port);
- p += 4;
+ outl(le32_to_cpu(*(unsigned int *) src), port);
+ ((unsigned int *) src)++;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
- l = *(unsigned short *)p;
- p += 2;
+ l = *(unsigned short *) src;
+ ((unsigned short *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l << 16 | l2 >> 16), port);
l = l2;
}
- l2 = *(unsigned short *)p;
+ l2 = *(unsigned short *) src;
outl (le32_to_cpu(l << 16 | l2), port);
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
-
- l = *p << 24;
- p++;
- l |= *(unsigned short *)p << 8;
- p += 2;
-
+
+ l = *(unsigned char *) src << 24;
+ ((unsigned char *) src)++;
+ l |= *(unsigned short *) src << 8;
+ ((unsigned short *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l | l2 >> 24), port);
l = l2 << 8;
}
- l2 = *p;
- outl (le32_to_cpu(l | l2), port);
+ l2 = *(unsigned char *) src;
+ outl (le32_to_cpu(l | l2), port);
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
- l = *p << 24;
- p++;
-
+ l = *(unsigned char *) src << 24;
+ ((unsigned char *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l | l2 >> 8), port);
l = l2 << 24;
}
- l2 = *(unsigned short *)p << 16;
- p += 2;
- l2 |= *p;
+ l2 = *(unsigned short *) src << 16;
+ ((unsigned short *) src)++;
+ l2 |= *(unsigned char *) src;
outl (le32_to_cpu(l | l2), port);
break;
}
* Copyright 1999 SuSE GmbH
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
- * Copyright 2004 Randolph Chung (tausq@debian.org)
*
*/
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
-#include <asm/mmzone.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern char __init_begin, __init_end;
#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES];
-bootmem_data_t bmem_data[MAX_NUMNODES];
-unsigned char pfnnid_map[PFNNID_MAP_MAX];
+struct node_map_data node_data[MAX_PHYSMEM_RANGES];
+bootmem_data_t bmem_data[MAX_PHYSMEM_RANGES];
+unsigned char *chunkmap;
+unsigned int maxchunkmap;
#endif
static struct resource data_resource = {
disable_sr_hashing(); /* Turn off space register hashing */
+#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * The below is still true as of 2.4.2. If this is ever fixed,
+ * we can remove this warning!
+ */
+
+ printk(KERN_WARNING "\n\n");
+ printk(KERN_WARNING "CONFIG_DISCONTIGMEM is enabled, which is probably a mistake. This\n");
+ printk(KERN_WARNING "option can lead to heavy swapping, even when there are gigabytes\n");
+ printk(KERN_WARNING "of free memory.\n\n");
+#endif
+
+#ifdef __LP64__
+
+#ifndef CONFIG_DISCONTIGMEM
/*
* Sort the ranges. Since the number of ranges is typically
* small, and performance is not an issue here, just do
}
}
-#ifndef CONFIG_DISCONTIGMEM
/*
* Throw out ranges that are too far apart (controlled by
- * MAX_GAP).
+ * MAX_GAP). If CONFIG_DISCONTIGMEM wasn't implemented so
+ * poorly, we would recommend enabling that option, but,
+ * until it is fixed, this is the best way to go.
*/
for (i = 1; i < npmem_ranges; i++) {
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages) > MAX_GAP) {
npmem_ranges = i;
- printk("Large gap in memory detected (%ld pages). "
- "Consider turning on CONFIG_DISCONTIGMEM\n",
- pmem_ranges[i].start_pfn -
- (pmem_ranges[i-1].start_pfn +
- pmem_ranges[i-1].pages));
break;
}
}
}
}
+#endif /* __LP64__ */
+
sysram_resource_count = npmem_ranges;
for (i = 0; i < sysram_resource_count; i++) {
struct resource *res = &sysram_resources[i];
mem_limit_func(); /* check for "mem=" argument */
mem_max = 0;
- num_physpages = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long rsize;
npmem_ranges = i + 1;
mem_max = mem_limit;
}
- num_physpages += pmem_ranges[i].pages;
break;
}
- num_physpages += pmem_ranges[i].pages;
mem_max += rsize;
}
printk(KERN_INFO "Total Memory: %ld Mb\n",mem_max >> 20);
#ifndef CONFIG_DISCONTIGMEM
+
/* Merge the ranges, keeping track of the holes */
{
bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
#ifdef CONFIG_DISCONTIGMEM
- for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
- memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bmem_data[i];
- }
- memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
-
- numnodes = npmem_ranges;
-
for (i = 0; i < npmem_ranges; i++)
- node_set_online(i);
+ node_data[i].pg_data.bdata = &bmem_data[i];
#endif
-
/*
* Initialize and free the full range of memory in each range.
* Note that the only writing these routines do are to the bootmap,
void __init mem_init(void)
{
- high_memory = __va((max_pfn << PAGE_SHIFT));
+ int i;
-#ifndef CONFIG_DISCONTIGMEM
- max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
- mem_map = zone_table[ZONE_DMA]->zone_mem_map;
- totalram_pages += free_all_bootmem();
-#else
- {
- int i;
+ high_memory = __va((max_pfn << PAGE_SHIFT));
+ max_mapnr = (virt_to_page(high_memory - 1) - mem_map) + 1;
- for (i = 0; i < npmem_ranges; i++)
- totalram_pages += free_all_bootmem_node(NODE_DATA(i));
- }
-#endif
+ num_physpages = 0;
+ mem_map = zone_table[0]->zone_mem_map;
+ for (i = 0; i < npmem_ranges; i++)
+ num_physpages += free_all_bootmem_node(NODE_DATA(i));
+ totalram_pages = num_physpages;
printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
-#ifndef CONFIG_DISCONTIGMEM
i = max_mapnr;
while (i-- > 0) {
total++;
reserved++;
else if (PageSwapCache(mem_map+i))
cached++;
- else if (!page_count(&mem_map[i]))
+ else if (!atomic_read(&mem_map[i].count))
free++;
else
- shared += page_count(&mem_map[i]) - 1;
- }
-#else
- for (i = 0; i < npmem_ranges; i++) {
- int j;
-
- for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
- struct page *p;
-
- p = node_mem_map(i) + j - node_start_pfn(i);
-
- total++;
- if (PageReserved(p))
- reserved++;
- else if (PageSwapCache(p))
- cached++;
- else if (!page_count(p))
- free++;
- else
- shared += page_count(p) - 1;
- }
+ shared += atomic_read(&mem_map[i].count) - 1;
}
-#endif
printk(KERN_INFO "%d pages of RAM\n", total);
printk(KERN_INFO "%d reserved pages\n", reserved);
printk(KERN_INFO "%d pages shared\n", shared);
printk(KERN_INFO "%d pages swap cached\n", cached);
-
-
-#ifdef CONFIG_DISCONTIGMEM
- {
- struct zonelist *zl;
- int i, j, k;
-
- for (i = 0; i < npmem_ranges; i++) {
- for (j = 0; j < MAX_NR_ZONES; j++) {
- zl = NODE_DATA(i)->node_zonelists + j;
-
- printk("Zone list for zone %d on node %d: ", j, i);
- for (k = 0; zl->zones[k] != NULL; k++)
- printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
- printk("\n");
- }
- }
- }
-#endif
}
#if PTRS_PER_PMD == 1
pmd = (pmd_t *)__pa(pg_dir);
#else
- pmd = (pmd_t *)pgd_address(*pg_dir);
+ pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
/*
* pmd is physical at this point
pmd = (pmd_t *) __pa(pmd);
}
- pgd_populate(NULL, pg_dir, __va(pmd));
+ pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
#endif
pg_dir++;
* pg_table is physical at this point
*/
- pg_table = (pte_t *)pmd_address(*pmd);
+ pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
if (!pg_table) {
pg_table = (pte_t *)
alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table);
}
- pmd_populate_kernel(NULL, pmd, __va(pg_table));
+ pmd_val(*pmd) = _PAGE_TABLE |
+ (unsigned long) pg_table;
/* now change pg_table to kernel virtual addresses */
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
pte_t pte;
+#if !defined(CONFIG_STI_CONSOLE)
+#warning STI console should explicitly allocate executable pages but does not
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
&& address != gw_addr)
pte = __mk_pte(address, PAGE_KERNEL_RO);
else
+#endif
pte = __mk_pte(address, pgprot);
if (address >= end_paddr)
flush_tlb_all_local();
for (i = 0; i < npmem_ranges; i++) {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0, };
- /* We have an IOMMU, so all memory can go into a single
- ZONE_DMA zone. */
zones_size[ZONE_DMA] = pmem_ranges[i].pages;
-
free_area_init_node(i,NODE_DATA(i),NULL,zones_size,
- pmem_ranges[i].start_pfn, 0);
+ (pmem_ranges[i].start_pfn << PAGE_SHIFT),0);
+ }
#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * Initialize support for virt_to_page() macro.
+ *
+ * Note that MAX_ADDRESS is the largest virtual address that
+ * we can map. However, since we map all physical memory into
+ * the kernel address space, it also has an effect on the maximum
+ * physical address we can map (MAX_ADDRESS - PAGE_OFFSET).
+ */
+
+ maxchunkmap = MAX_ADDRESS >> CHUNKSHIFT;
+ chunkmap = (unsigned char *)alloc_bootmem(maxchunkmap);
+
+ for (i = 0; i < maxchunkmap; i++)
+ chunkmap[i] = BADCHUNK;
+
+ for (i = 0; i < npmem_ranges; i++) {
+
+ ADJ_NODE_MEM_MAP(i) = NODE_MEM_MAP(i) - pmem_ranges[i].start_pfn;
{
- int j;
- for (j = (node_start_pfn(i) >> PFNNID_SHIFT);
- j <= (node_end_pfn(i) >> PFNNID_SHIFT);
- j++) {
- pfnnid_map[j] = i;
- }
+ unsigned long chunk_paddr;
+ unsigned long end_paddr;
+ int chunknum;
+
+ chunk_paddr = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ end_paddr = chunk_paddr + (pmem_ranges[i].pages << PAGE_SHIFT);
+ chunk_paddr &= CHUNKMASK;
+
+ chunknum = (int)CHUNKNUM(chunk_paddr);
+ while (chunk_paddr < end_paddr) {
+ if (chunknum >= maxchunkmap)
+ goto badchunkmap1;
+ if (chunkmap[chunknum] != BADCHUNK)
+ goto badchunkmap2;
+ chunkmap[chunknum] = (unsigned char)i;
+ chunk_paddr += CHUNKSZ;
+ chunknum++;
+ }
}
-#endif
}
+
+ return;
+
+badchunkmap1:
+ panic("paging_init: Physical address exceeds maximum address space!\n");
+badchunkmap2:
+ panic("paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller\n");
+#endif
}
#ifdef CONFIG_PA20
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, err;
- uint dp_offset;
+ void * dpaddr;
unsigned char *eap;
unsigned long mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
- ep->sen_genscc.scc_rbase = dp_offset;
- cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
+ dpaddr = cpm2_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+ ep->sen_genscc.scc_rbase = cpm2_dpram_offset(dpaddr);
+ cep->rx_bd_base = (cbd_t *)dpaddr;
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
- ep->sen_genscc.scc_tbase = dp_offset;
- cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
+ dpaddr = cpm2_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+ ep->sen_genscc.scc_tbase = cpm2_dpram_offset(dpaddr);
+ cep->tx_bd_base = (cbd_t *)dpaddr;
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
#define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
void
-cpm_setbrg(uint brg, uint rate)
+m8xx_cpm_setbrg(uint brg, uint rate)
{
volatile uint *bp;
* with the processor and the microcode patches applied / activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
+ rh_attach_region(&cpm_dpmem_info, cp->cp_dpmem + CPM_DATAONLY_BASE,
+ CPM_DATAONLY_SIZE);
}
/*
* Now it returns the actuall physical address of that area.
* use m8xx_cpm_dpram_offset() to get the index
*/
-uint cpm_dpalloc(uint size, uint align)
+void *m8xx_cpm_dpalloc(int size)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- cpm_dpmem_info.alignment = align;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc);
+EXPORT_SYMBOL(m8xx_cpm_dpalloc);
-int cpm_dpfree(uint offset)
+int m8xx_cpm_dpfree(void *addr)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, (void *)offset);
+ ret = rh_free(&cpm_dpmem_info, addr);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(cpm_dpfree);
+EXPORT_SYMBOL(m8xx_cpm_dpfree);
-uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
+void *m8xx_cpm_dpalloc_fixed(void *addr, int size)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- cpm_dpmem_info.alignment = align;
- start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
+ start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+EXPORT_SYMBOL(m8xx_cpm_dpalloc_fixed);
-void cpm_dpdump(void)
+void m8xx_cpm_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(cpm_dpdump);
+EXPORT_SYMBOL(m8xx_cpm_dpdump);
-void *cpm_dpram_addr(uint offset)
+int m8xx_cpm_dpram_offset(void *addr)
+{
+ return (u_char *)addr - ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem;
+}
+EXPORT_SYMBOL(m8xx_cpm_dpram_offset);
+
+void *m8xx_cpm_dpram_addr(int offset)
{
return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
}
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(m8xx_cpm_dpram_addr);
static int mixer_open(struct inode *inode, struct file *file)
{
mixer.busy = 1;
- return nonseekable_open(inode, file);
+ return 0;
}
sound_set_format(AFMT_MU_LAW);
}
- return nonseekable_open(inode, file);
+ return 0;
err_out_nobusy:
if (file->f_mode & FMODE_WRITE) {
len += sprintf(buffer+len, "\tsq.active = %d sq.syncing = %d\n",
sq.active, sq.syncing);
state.len = len;
- return nonseekable_open(inode, file);
+ return 0;
}
int __init tdm8xx_sound_init(void)
{
int i, has_sound;
- uint dp_offset;
+ uint dp_addr, dp_mem;
volatile uint *sirp;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
/* We need to allocate a transmit and receive buffer
* descriptors from dual port ram.
*/
- dp_addr = cpm_dpalloc(sizeof(cbd_t) * numReadBufs, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numReadBufs);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_rbase = dp_offset;
+ up->smc_rbase = dp_mem;
rx_cur = rx_base = (cbd_t *)bdp;
for (i=0; i<(numReadBufs-1); i++) {
/* Now, do the same for the transmit buffers.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * numBufs, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numBufs);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_tbase = dp_offset;
+ up->smc_tbase = dp_mem;
tx_cur = tx_base = (cbd_t *)bdp;
for (i=0; i<(numBufs-1); i++) {
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, k, err;
- uint dp_offset;
+ void *dp_mem;
+ unsigned int dp_addr;
unsigned char *eap, *ba;
dma_addr_t mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
- ep->sen_genscc.scc_rbase = dp_offset;
- cep->rx_bd_base = cpm_dpram_addr(dp_offset);
-
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
- ep->sen_genscc.scc_tbase = dp_offset;
- cep->tx_bd_base = cpm_dpram_addr(dp_offset);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ ep->sen_genscc.scc_rbase = dp_mem;
+ cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
+
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ ep->sen_genscc.scc_tbase = dp_mem;
+ cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
{
struct serial_state * state;
ser_info_t *info;
- uint mem_addr, iobits, dp_offset;
+ uint mem_addr, dp_addr, dp_mem, iobits;
int i, j, idx;
ushort chan;
volatile cbd_t *bdp;
* descriptors from dual port ram, and a character
* buffer area from host mem.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
info->rx_cur = info->rx_bd_base = (cbd_t *)bdp;
for (j=0; j<(RX_NUM_FIFO-1); j++) {
if (info->state->smc_scc_num & NUM_IS_SCC) {
scp = &cp->cp_scc[idx];
sup = (scc_uart_t *)&cp->cp_dparam[state->port];
- sup->scc_genscc.scc_rbase = dp_offset;
+ sup->scc_genscc.scc_rbase = dp_mem;
}
else {
sp = &cp->cp_smc[idx];
up = (smc_uart_t *)&cp->cp_dparam[state->port];
- up->smc_rbase = dp_offset;
+ up->smc_rbase = dp_mem;
}
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
info->tx_cur = info->tx_bd_base = (cbd_t *)bdp;
for (j=0; j<(TX_NUM_FIFO-1); j++) {
bdp->cbd_sc = (BD_SC_WRAP | BD_SC_INTRPT);
if (info->state->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_tbase = dp_offset;
+ sup->scc_genscc.scc_tbase = dp_mem;
/* Set up the uart parameters in the
* parameter ram.
cp->cp_simode &= ~(0xffff << (idx * 16));
cp->cp_simode |= (i << ((idx * 16) + 12));
- up->smc_tbase = dp_offset;
+ up->smc_tbase = dp_mem;
/* Set up the uart parameters in the
* parameter ram.
static int __init serial_console_setup(struct console *co, char *options)
{
struct serial_state *ser;
- uint mem_addr, bidx, idx, dp_offset;
+ uint mem_addr, dp_addr, dp_mem, bidx, idx;
ushort chan;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
* memory yet because vm allocator isn't initialized
* during this early console init.
*/
- dp_offset = cpm_dpalloc(8, 8);
- mem_addr = (uint)(&cpmp->cp_dpmem[dp_offset]);
+ dp_mem = m8xx_cpm_dpalloc(8);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ mem_addr = (uint)(&cpmp->cp_dpmem[dp_addr]);
/* Allocate space for two buffer descriptors in the DP ram.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * 2, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * 2);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Set the physical address of the host memory buffers in
* the buffer descriptors.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
bdp->cbd_bufaddr = iopa(mem_addr);
(bdp+1)->cbd_bufaddr = iopa(mem_addr+4);
*/
if (ser->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_rbase = dp_offset;
- sup->scc_genscc.scc_tbase = dp_offset + sizeof(cbd_t);
+ sup->scc_genscc.scc_rbase = dp_mem;
+ sup->scc_genscc.scc_tbase = dp_mem + sizeof(cbd_t);
/* Set up the uart parameters in the
* parameter ram.
}
else {
- up->smc_rbase = dp_offset; /* Base of receive buffer desc. */
- up->smc_tbase = dp_offset+sizeof(cbd_t); /* Base of xmt buffer desc. */
+ up->smc_rbase = dp_mem; /* Base of receive buffer desc. */
+ up->smc_tbase = dp_mem+sizeof(cbd_t); /* Base of xmt buffer desc. */
up->smc_rfcr = SMC_EB;
up->smc_tfcr = SMC_EB;
default 6xx
config 6xx
- bool "6xx/7xx/74xx/52xx/8260"
+ bool "6xx/7xx/74xx/8260"
help
There are four types of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
- versions (821, 823, 850, 855, 860, 52xx, 8260), the IBM embedded
- versions (403 and 405) and the high end 64 bit Power processors
- (POWER 3, POWER4, and IBM 970 also known as G5)
+ versions (821, 823, 850, 855, 860, 8260), the IBM embedded versions
+ (403 and 405) and the high end 64 bit Power processors (POWER 3,
+ POWER4, and IBM 970 also known as G5)
Unless you are building a kernel for one of the embedded processor
systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
Note that the kernel runs in 32-bit mode even on 64-bit chips.
- Also note that because the 52xx & 82xx family has a 603e core,
- specific support for that chipset is asked later on.
+ Also note that because the 82xx family has a 603e core, specific
+ support for that chipset is asked later on.
config 40x
bool "40x"
fly. This is a nice method to save battery power on notebooks,
because the lower the clock speed, the less power the CPU consumes.
- For more information, take a look at <file:Documentation/cpu-freq> or
+ For more information, take a look at linux/Documentation/cpu-freq or
at <http://www.brodo.de/cpufreq/>
If in doubt, say N.
config SBS8260
bool "SBS8260"
-config RPX8260
+config RPX6
bool "RPXSUPER"
config TQM8260
config ADS8272
bool "ADS8272"
-config LITE5200
- bool "Freescale LITE5200 / (IceCube)"
- select PPC_MPC52xx
- help
- Support for the LITE5200 dev board for the MPC5200 from Freescale.
- This is for the LITE5200 version 2.0 board. Don't know if it changes
- much but it's only been tested on this board version. I think this
- board is also known as IceCube.
-
endchoice
config PQ2ADS
bool
depends on 8xx || 8260
default y
-
-config PPC_MPC52xx
- bool
config 8260
bool "CPM2 Support" if WILLOW
depends on 6xx
- default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx
+ default y if TQM8260 || RPXSUPER || EST8260 || SBS8260 || SBC82xx
help
The MPC8260 is a typical embedded CPU made by Motorola. Selecting
this option means that you wish to build a kernel for a machine with
config CPM2
bool
- depends on 8260 || MPC8560
+ depends on 8260
default y
help
The CPM2 (Communications Processor Module) is a coprocessor on
config FSL_OCP
bool
- depends on MPC10X_BRIDGE || PPC_MPC52xx
+ depends on MPC10X_BRIDGE
default y
config MPC10X_OPENPIC
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
+config PPC_RTAS
+ bool "Support for RTAS (RunTime Abstraction Services) in /proc"
+ depends on PPC_OF && PROC_FS
+ ---help---
+ When you use this option, you will be able to use RTAS from
+ userspace.
+
+ RTAS stands for RunTime Abstraction Services and should
+ provide a portable way to access and set system information. This is
+ commonly used on RS/6000 (pSeries) computers.
+
+ You can access RTAS via the special proc file system entry rtas.
+ Don't confuse this rtas entry with the one in /proc/device-tree/rtas
+ which is readonly.
+
+ If you don't know if you can use RTAS look into
+ /proc/device-tree/rtas. If there are some entries, it is very likely
+ that you will be able to use RTAS.
+
+ You can do cool things with rtas. To print out information about
+ various sensors in the system, just do a
+
+ $ cat /proc/rtas/sensors
+
+ or if you power off your machine at night but want it running when
+ you enter your office at 7:45 am, do a
+
+ # date -d 'tomorrow 7:30' +%s > /proc/rtas/poweron
+
+ and shutdown.
+
+ If unsure, say Y.
+
config PREP_RESIDUAL
bool "Support for PReP Residual Data"
depends on PPC_PREP
config KGDB
bool "Include kgdb kernel debugger"
- depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
+ depends on DEBUG_KERNEL
select DEBUG_INFO
help
Include in-kernel hooks for kgdb, the Linux kernel source level
config SERIAL_TEXT_DEBUG
bool "Support for early boot texts over serial port"
- depends on 4xx || GT64260 || LOPEC || PPLUS || PRPMC800 || PPC_GEN550 || PPC_MPC52xx
+ depends on 4xx || GT64260 || LOPEC || PPLUS || PRPMC800 || PPC_GEN550
config PPC_OCP
bool
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic
CPPFLAGS += -Iarch/$(ARCH)
AFLAGS += -Iarch/$(ARCH)
-CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
+cflags-y += -Iarch/$(ARCH) -msoft-float -pipe \
-ffixed-r2 -Wno-uninitialized -mmultiple
CPP = $(CC) -E $(CFLAGS)
-CHECK := $(CHECK) -D__powerpc__=1
-
ifndef CONFIG_E500
-CFLAGS += -mstring
+cflags-y += -mstring
endif
-cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
-cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
-cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
-cpu-as-$(CONFIG_E500) += -Wa,-me500
+cflags-$(CONFIG_4xx) += -Wa,-m405
+cflags-$(CONFIG_E500) += -Wa,-me500
+cflags-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
-AFLAGS += $(cpu-as-y)
-CFLAGS += $(cpu-as-y)
+CFLAGS += $(cflags-y)
head-y := arch/ppc/kernel/head.o
head-$(CONFIG_8xx) := arch/ppc/kernel/head_8xx.o
else
NEW_AS := 0
endif
-# gcc-3.4 and binutils-2.14 are a fatal combination.
-GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-BAD_GCC_AS := $(shell echo mftb 5 | $(AS) -mppc -many -o /dev/null >/dev/null 2>&1 && echo 0 || echo 1)
-checkbin:
-ifeq ($(GCC_VERSION)$(BAD_GCC_AS),03041)
- @echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
- @echo 'correctly with gcc-3.4 and your version of binutils.'
- @echo '*** Please upgrade your binutils or downgrade your gcc'
- @false
-endif
ifneq ($(NEW_AS),0)
+checkbin:
@echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
@echo 'correctly with old versions of binutils.'
@echo '*** Please upgrade your binutils to ${GOODVER} or newer'
@false
-endif
+else
+checkbin:
@true
+endif
CLEAN_FILES += include/asm-$(ARCH)/offsets.h \
arch/$(ARCH)/kernel/asm-offsets.s
void _vprintk(void(*putc)(const char), const char *fmt0, va_list ap);
unsigned char *ISA_io = NULL;
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
extern unsigned long com_port;
extern int serial_tstc(unsigned long com_port);
int tstc(void)
{
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
if(keyb_present)
return (CRT_tstc() || serial_tstc(com_port));
else
int getc(void)
{
while (1) {
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
if (serial_tstc(com_port))
return (serial_getc(com_port));
#endif /* serial console */
{
int x,y;
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' )
serial_putc(com_port, '\r');
y = orig_y;
while ( ( c = *s++ ) != '\0' ) {
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' ) serial_putc(com_port, '\r');
#endif /* serial console */
entrypoint-$(CONFIG_SPRUCE) := 0x00800000
misc-$(CONFIG_SPRUCE) += misc-spruce.o
- zimage-$(CONFIG_LITE5200) := zImage-STRIPELF
-zimageinitrd-$(CONFIG_LITE5200) := zImage.initrd-STRIPELF
- end-$(CONFIG_LITE5200) := lite5200
- cacheflag-$(CONFIG_LITE5200) := -include $(clear_L2_L3)
-
-
# SMP images should have a '.smp' suffix.
end-$(CONFIG_SMP) := $(end-y).smp
boot-$(CONFIG_RPXCLASSIC) += iic.o pci.o qspan_pci.o
boot-$(CONFIG_RPXLITE) += iic.o
# Different boards need different serial implementations.
-ifeq ($(CONFIG_SERIAL_CPM_CONSOLE),y)
+ifeq ($(CONFIG_SERIAL_CONSOLE),y)
boot-$(CONFIG_8xx) += m8xx_tty.o
boot-$(CONFIG_8260) += m8260_tty.o
-endif
-boot-$(CONFIG_SERIAL_MPC52xx_CONSOLE) += mpc52xx_tty.o
boot-$(CONFIG_GT64260_CONSOLE) += gt64260_tty.o
+endif
LIBS := $(common)/lib.a $(bootlib)/lib.a
ifeq ($(CONFIG_PPC_PREP),y)
#endif /* CONFIG_MBX */
#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC) || \
- defined(CONFIG_RPX8260) || defined(CONFIG_EP405)
+ defined(CONFIG_RPX6) || defined(CONFIG_EP405)
/* Helper functions for Embedded Planet boards.
*/
/* Because I didn't find anything that would do this.......
}
}
-#ifdef CONFIG_RPX8260
+#ifdef CONFIG_RPX6
static uint
rpx_baseten(u_char *cp)
{
}
#endif /* SBS8260 */
-#ifdef CONFIG_RPX8260
+#ifdef CONFIG_RPX6
void
embed_config(bd_t **bdp)
{
isync
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
mr r29,r3 /* On the MBX860, r3 is the board info pointer.
* On the RPXSUPER, r3 points to the NVRAM
* configuration keys.
mr r3, r29
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
mr r4,r29 /* put the board info pointer where the relocate
* routine will find it
*/
/* If defined, enables serial console. The value (1 through 4)
* should designate which SCC is used, but this isn't complete. Only
* SCC1 is known to work at this time.
- * We're only linked if SERIAL_CPM_CONSOLE=y, so we only need to test
- * SERIAL_CPM_SCC1.
*/
-#ifdef CONFIG_SERIAL_CPM_SCC1
+#ifdef CONFIG_SCC_CONSOLE
#define SCC_CONSOLE 1
#endif
unsigned long
serial_init(int ignored, bd_t *bd)
{
+ volatile smc_t *sp;
+ volatile smc_uart_t *up;
#ifdef SCC_CONSOLE
volatile scc_t *sccp;
volatile scc_uart_t *sup;
-#else
- volatile smc_t *sp;
- volatile smc_uart_t *up;
#endif
volatile cbd_t *tbdf, *rbdf;
volatile cpm2_map_t *ip;
{
volatile cbd_t *rbdf;
volatile char *buf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
int i, nc;
{
volatile cbd_t *tbdf;
volatile char *buf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
+ extern bd_t *board_info;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
#ifdef SCC_CONSOLE
serial_tstc(void *ignored)
{
volatile cbd_t *rbdf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
* initialize the serial console port.
*/
embed_config(&bp);
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
com_port = serial_init(0, bp);
#endif
rec = (struct bi_record *)((unsigned long)rec + rec->size);
}
puts("Now booting the kernel\n");
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
serial_close(com_port);
-#endif
return (unsigned long)hold_residual;
}
puts("\n");
puts("Uncompressing Linux...");
- gunzip(NULL, 0x400000, zimage_start, &zimage_size);
+ gunzip(0, 0x400000, zimage_start, &zimage_size);
puts("done.\n");
/* get the bi_rec address */
+++ /dev/null
-/*
- * arch/ppc/boot/simple/mpc52xx_tty.c
- *
- * Minimal serial functions needed to send messages out a MPC52xx
- * Programmable Serial Controller (PSC).
- *
- * Author: Dale Farnsworth <dfarnsworth@mvista.com>
- *
- * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is licensed
- * "as is" without any warranty of any kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <asm/uaccess.h>
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-#include <asm/serial.h>
-#include <asm/time.h>
-
-#if MPC52xx_PF_CONSOLE_PORT == 0
-#define MPC52xx_CONSOLE MPC52xx_PSC1
-#define MPC52xx_PSC_CONFIG_SHIFT 0
-#elif MPC52xx_PF_CONSOLE_PORT == 1
-#define MPC52xx_CONSOLE MPC52xx_PSC2
-#define MPC52xx_PSC_CONFIG_SHIFT 4
-#elif MPC52xx_PF_CONSOLE_PORT == 2
-#define MPC52xx_CONSOLE MPC52xx_PSC3
-#define MPC52xx_PSC_CONFIG_SHIFT 8
-#else
-#error "MPC52xx_PF_CONSOLE_PORT not defined"
-#endif
-
-static struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE;
-
-/* The decrementer counts at the system bus clock frequency
- * divided by four. The most accurate time base is connected to the
- * rtc. We read the decrementer change during one rtc tick (one second)
- * and multiply by 4 to get the system bus clock frequency.
- */
-int
-mpc52xx_ipbfreq(void)
-{
- struct mpc52xx_rtc *rtc = (struct mpc52xx_rtc*)MPC52xx_RTC;
- struct mpc52xx_cdm *cdm = (struct mpc52xx_cdm*)MPC52xx_CDM;
- int current_time, previous_time;
- int tbl_start, tbl_end;
- int xlbfreq, ipbfreq;
-
- out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */
- previous_time = in_be32(&rtc->time);
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_start = get_tbl();
- previous_time = current_time;
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_end = get_tbl();
- out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */
-
- xlbfreq = (tbl_end - tbl_start) << 8;
- ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ? xlbfreq / 2 : xlbfreq;
-
- return ipbfreq;
-}
-
-unsigned long
-serial_init(int ignored, void *ignored2)
-{
- struct mpc52xx_gpio *gpio = (struct mpc52xx_gpio *)MPC52xx_GPIO;
- int divisor;
- int mode1;
- int mode2;
- u32 val32;
-
- static int been_here = 0;
-
- if (been_here)
- return 0;
-
- been_here = 1;
-
- val32 = in_be32(&gpio->port_config);
- val32 &= ~(0x7 << MPC52xx_PSC_CONFIG_SHIFT);
- val32 |= MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD
- << MPC52xx_PSC_CONFIG_SHIFT;
- out_be32(&gpio->port_config, val32);
-
- out_8(&psc->command, MPC52xx_PSC_RST_TX
- | MPC52xx_PSC_RX_DISABLE | MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command, MPC52xx_PSC_RST_RX);
-
- out_be32(&psc->sicr, 0x0);
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
- out_be16(&psc->tfalarm, 0xf8);
-
- out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1
- | MPC52xx_PSC_RX_ENABLE
- | MPC52xx_PSC_TX_ENABLE);
-
- divisor = ((mpc52xx_ipbfreq()
- / (CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD * 16)) + 1) >> 1;
-
- mode1 = MPC52xx_PSC_MODE_8_BITS | MPC52xx_PSC_MODE_PARNONE
- | MPC52xx_PSC_MODE_ERR;
- mode2 = MPC52xx_PSC_MODE_ONE_STOP;
-
- out_8(&psc->ctur, divisor>>8);
- out_8(&psc->ctlr, divisor);
- out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
- out_8(&psc->mode, mode1);
- out_8(&psc->mode, mode2);
-
- return 0; /* ignored */
-}
-
-void
-serial_putc(void *ignored, const char c)
-{
- serial_init(0, 0);
-
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, c);
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ;
-}
-
-char
-serial_getc(void *ignored)
-{
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY)) ;
-
- return in_8(&psc->mpc52xx_psc_buffer_8);
-}
-
-int
-serial_tstc(void *ignored)
-{
- return (in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY) != 0;
-}
#include <sys/stat.h>
#include <unistd.h>
#include <netinet/in.h>
-#include <stdint.h>
/* This gets tacked on the front of the image. There are also a few
* bytes allocated after the _start label used by the boot rom (see
* head.S for details).
*/
typedef struct boot_block {
- uint32_t bb_magic; /* 0x0052504F */
- uint32_t bb_dest; /* Target address of the image */
- uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
- uint32_t bb_debug_flag; /* Run debugger or image after load */
- uint32_t bb_entry_point; /* The image address to start */
- uint32_t bb_checksum; /* 32 bit checksum including header */
- uint32_t reserved[2];
+ unsigned long bb_magic; /* 0x0052504F */
+ unsigned long bb_dest; /* Target address of the image */
+ unsigned long bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
+ unsigned long bb_debug_flag; /* Run debugger or image after load */
+ unsigned long bb_entry_point; /* The image address to start */
+ unsigned long bb_checksum; /* 32 bit checksum including header */
+ unsigned long reserved[2];
} boot_block_t;
#define IMGBLK 512
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MMU=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_HAVE_DEC_LOCK=y
-CONFIG_PPC=y
-CONFIG_PPC32=y
-CONFIG_GENERIC_NVRAM=y
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
-CONFIG_BROKEN_ON_SMP=y
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-CONFIG_MODVERSIONS=y
-CONFIG_KMOD=y
-#
-# Processor
-#
-CONFIG_6xx=y
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_POWER3 is not set
-# CONFIG_POWER4 is not set
-# CONFIG_8xx is not set
-# CONFIG_E500 is not set
-# CONFIG_ALTIVEC is not set
-# CONFIG_TAU is not set
-# CONFIG_CPU_FREQ is not set
-CONFIG_FSL_OCP=y
-CONFIG_PPC_STD_MMU=y
-#
-# Platform options
-#
-# CONFIG_PPC_MULTIPLATFORM is not set
-# CONFIG_APUS is not set
-# CONFIG_WILLOW is not set
-# CONFIG_PCORE is not set
-# CONFIG_POWERPMC250 is not set
-# CONFIG_EV64260 is not set
-# CONFIG_SPRUCE is not set
-# CONFIG_LOPEC is not set
-# CONFIG_MCPN765 is not set
-# CONFIG_MVME5100 is not set
-# CONFIG_PPLUS is not set
-# CONFIG_PRPMC750 is not set
-# CONFIG_PRPMC800 is not set
-# CONFIG_SANDPOINT is not set
-# CONFIG_ADIR is not set
-# CONFIG_K2 is not set
-# CONFIG_PAL4 is not set
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_SBC82xx is not set
-# CONFIG_SBS8260 is not set
-# CONFIG_RPX6 is not set
-# CONFIG_TQM8260 is not set
-# CONFIG_ADS8272 is not set
-CONFIG_LITE5200=y
-CONFIG_PPC_MPC52xx=y
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 root=/dev/ram0 rw"
-#
-# Bus options
-#
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_PCI=y
-CONFIG_PCI_DOMAINS=y
-# CONFIG_PCI_LEGACY_PROC is not set
-# CONFIG_PCI_NAMES is not set
-#
-# Advanced setup
-#
-CONFIG_ADVANCED_OPTIONS=y
-CONFIG_HIGHMEM_START=0xfe000000
-# CONFIG_LOWMEM_SIZE_BOOL is not set
-CONFIG_LOWMEM_SIZE=0x30000000
-# CONFIG_KERNEL_START_BOOL is not set
-CONFIG_KERNEL_START=0xc0000000
-# CONFIG_TASK_SIZE_BOOL is not set
-CONFIG_TASK_SIZE=0x80000000
-# CONFIG_BOOT_LOAD_BOOL is not set
-CONFIG_BOOT_LOAD=0x00800000
-#
-# Device Drivers
-#
-#
-# Generic Driver Options
-#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-#
-# Plug and Play support
-#
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_SX8 is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-#
-# Fusion MPT device support
-#
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-#
-# Macintosh device drivers
-#
-#
-# Networking support
-#
-# CONFIG_NET is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-#
-# ISDN subsystem
-#
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-#
-# Input device support
-#
-CONFIG_INPUT=y
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_EVBUG=y
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_MPC52xx=y
-CONFIG_SERIAL_MPC52xx_CONSOLE=y
-CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-#
-# Misc devices
-#
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-#
-# Digital Video Broadcasting Devices
-#
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-#
-# USB support
-#
-# CONFIG_USB is not set
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_FAT_FS is not set
-# CONFIG_NTFS_FS is not set
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-#
-# Library routines
-#
-# CONFIG_CRC16 is not set
-# CONFIG_CRC32 is not set
-# CONFIG_LIBCRC32C is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_KGDB is not set
-# CONFIG_XMON is not set
-# CONFIG_BDI_SWITCH is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_SERIAL_TEXT_DEBUG=y
-CONFIG_PPC_OCP=y
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MMU=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_HAVE_DEC_LOCK=y
-CONFIG_PPC=y
-CONFIG_PPC32=y
-CONFIG_GENERIC_NVRAM=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-# CONFIG_IKCONFIG is not set
-CONFIG_EMBEDDED=y
-# CONFIG_KALLSYMS is not set
-CONFIG_FUTEX=y
-# CONFIG_EPOLL is not set
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-# CONFIG_MODULES is not set
-
-#
-# Processor
-#
-CONFIG_6xx=y
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_POWER3 is not set
-# CONFIG_POWER4 is not set
-# CONFIG_8xx is not set
-# CONFIG_E500 is not set
-# CONFIG_CPU_FREQ is not set
-CONFIG_EMBEDDEDBOOT=y
-CONFIG_PPC_STD_MMU=y
-
-#
-# Platform options
-#
-# CONFIG_PPC_MULTIPLATFORM is not set
-# CONFIG_APUS is not set
-# CONFIG_WILLOW is not set
-# CONFIG_PCORE is not set
-# CONFIG_POWERPMC250 is not set
-# CONFIG_EV64260 is not set
-# CONFIG_SPRUCE is not set
-# CONFIG_LOPEC is not set
-# CONFIG_MCPN765 is not set
-# CONFIG_MVME5100 is not set
-# CONFIG_PPLUS is not set
-# CONFIG_PRPMC750 is not set
-# CONFIG_PRPMC800 is not set
-# CONFIG_SANDPOINT is not set
-# CONFIG_ADIR is not set
-# CONFIG_K2 is not set
-# CONFIG_PAL4 is not set
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_SBC82xx is not set
-# CONFIG_SBS8260 is not set
-CONFIG_RPX8260=y
-# CONFIG_TQM8260 is not set
-# CONFIG_ADS8272 is not set
-CONFIG_8260=y
-CONFIG_CPM2=y
-# CONFIG_PC_KEYBOARD is not set
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_CMDLINE_BOOL is not set
-
-#
-# Bus options
-#
-# CONFIG_PCI is not set
-# CONFIG_PCI_DOMAINS is not set
-
-#
-# Advanced setup
-#
-# CONFIG_ADVANCED_OPTIONS is not set
-
-#
-# Default settings for advanced configuration options are used
-#
-CONFIG_HIGHMEM_START=0xfe000000
-CONFIG_LOWMEM_SIZE=0x30000000
-CONFIG_KERNEL_START=0xc0000000
-CONFIG_TASK_SIZE=0x80000000
-CONFIG_BOOT_LOAD=0x00400000
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-
-#
-# Macintosh device drivers
-#
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
-# CONFIG_OAKNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-# CONFIG_INPUT is not set
-
-#
-# Userland interfaces
-#
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-# CONFIG_SERIO is not set
-# CONFIG_SERIO_I8042 is not set
-
-#
-# Input Device Drivers
-#
-
-#
-# Character devices
-#
-# CONFIG_VT is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_CPM=y
-CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_SERIAL_CPM_SCC1 is not set
-# CONFIG_SERIAL_CPM_SCC2 is not set
-# CONFIG_SERIAL_CPM_SCC3 is not set
-# CONFIG_SERIAL_CPM_SCC4 is not set
-CONFIG_SERIAL_CPM_SMC1=y
-# CONFIG_SERIAL_CPM_SMC2 is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_EXPORTFS is not set
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-# CONFIG_SCC_ENET is not set
-CONFIG_FEC_ENET=y
-# CONFIG_USE_MDIO is not set
-
-#
-# CPM2 Options
-#
-# CONFIG_FCC1_ENET is not set
-# CONFIG_FCC2_ENET is not set
-CONFIG_FCC3_ENET=y
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC32 is not set
-# CONFIG_LIBCRC32C is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_KGDB_CONSOLE is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_PMACZILOG=y
+# CONFIG_SERIAL_CORE is not set
+# CONFIG_SERIAL_PMACZILOG is not set
# CONFIG_SERIAL_PMACZILOG_CONSOLE is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_4xx
+EXTRA_AFLAGS := -Wa,-m405
+endif
+ifdef CONFIG_E500
+EXTRA_AFLAGS := -Wa,-me500
+endif
+
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
/* All of the bits we have to set.....
*/
- ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK
-BEGIN_FTR_SECTION
- ori r11,r11,HID0_BTIC
-END_FTR_SECTION_IFCLR(CPU_FTR_NO_BTIC)
+ ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
#endif
/* We need to mark all pages as being coherent if we're SMP or we
- * have a 754x and an MPC107 host bridge.
- */
+ * have a 754x and an MPC107 host bridge. */
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_L3_DISABLE_NAP,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_L3_DISABLE_NAP | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
- COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- 32, 32,
- __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.0 */
- 0xffffffff, 0x80020100, "7447/7457",
- CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
- COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- 32, 32,
- __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.1 */
- 0xffffffff, 0x80020101, "7447/7457",
- CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
},
- { /* 7447/7457 Rev 1.2 and later */
- 0xffff0000, 0x80020000, "7447/7457",
+ { /* 7457 */
+ 0xffff0000, 0x80020000, "7457",
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
32, 32,
__setup_cpu_603
},
- { /* All G2_LE (603e core, plus some) have the same pvr */
- 0x7fff0000, 0x00820000, "G2_LE",
+ { /* 8280 is a G2_LE (603e core, plus some) */
+ 0x7fff0000, 0x00820000, "8280",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB |
CPU_FTR_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC,
no_page:
return NULL;
}
-EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(__dma_free_coherent);
+EXPORT_SYMBOL(dma_free_coherent);
/*
* Initialise the consistent memory allocation.
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,IMISS
tlbli r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
li r1,0xe15 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? 2: 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+ ori r3,r3,PPC44x_TLB_TS /* Translation state 1 */
+
+ li r0,1 /* TLB slot 1 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+ tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
/* Force context change */
isync
#endif /* CONFIG_SERIAL_TEXT_DEBUG */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
- mfspr r6,SPRN_SPEFSCR
- stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
cache_bitmask |= (1<<i);
return (void *)(&malloc_cache[i]);
}
- return NULL;
+ return 0;
}
void irq_kfree(void *ptr)
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
- if (desc->handler) {
- if (desc->handler->startup)
- desc->handler->startup(irq);
- else if (desc->handler->enable)
- desc->handler->enable(irq);
- }
+ unmask_irq(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
int i;
/* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", NULL);
+ root_irq_dir = proc_mkdir("irq", 0);
/* create /proc/irq/prof_cpu_mask */
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* 268 reserved for sys_kexec_load */
- .long sys_ioprio_set
- .long sys_ioprio_get
--- /dev/null
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ *
+ *
+ * Dynamic DMA mapping support.
+ *
+ * swiped from i386
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC;
+
+ if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
+ gfp |= GFP_DMA;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ ret = consistent_alloc(gfp, size, dma_handle);
+#else
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+#endif
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ *dma_handle = virt_to_bus(ret);
+#endif
+ }
+ return ret;
+}
+
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ consistent_free(vaddr);
+#else
+ free_pages((unsigned long)vaddr, get_order(size));
+#endif
+}
struct pci_dev* dev;
unsigned int *class_code, *reg;
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
- reg = (unsigned int *)get_property(node, "reg", NULL);
+ reg = (unsigned int *)get_property(node, "reg", 0);
if (!reg)
continue;
dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
continue;
make_one_node_map(node, hose->first_busno);
}
- of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
+ of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", 0);
if (of_prop_map)
memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
#ifdef DEBUG
* a fake root for all functions of a multi-function device,
* we go down them as well.
*/
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
strcmp(node->name, "multifunc-device"))
unsigned int *reg;
u8* fdata = (u8*)data;
- reg = (unsigned int *) get_property(node, "reg", NULL);
+ reg = (unsigned int *) get_property(node, "reg", 0);
if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
&& ((reg[0] >> 16) & 0xff) == fdata[0])
return 1;
if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
find_OF_pci_device_filter, (void *)node))
return -ENODEV;
- reg = (unsigned int *) get_property(node, "reg", NULL);
+ reg = (unsigned int *) get_property(node, "reg", 0);
if (!reg)
return -ENODEV;
*bus = (reg[0] >> 16) & 0xff;
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
*buf = 0;
return buf;
}
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return mem;
}
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return (numChars);
}
#include <linux/ctype.h>
#include <linux/threads.h>
#include <linux/smp_lock.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/system.h>
#include <asm/reg.h>
-static int ppc_htab_show(struct seq_file *m, void *v);
+static ssize_t ppc_htab_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos);
static ssize_t ppc_htab_write(struct file * file, const char __user * buffer,
size_t count, loff_t *ppos);
+static long long ppc_htab_lseek(struct file * file, loff_t offset, int orig);
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern unsigned int primary_pteg_full;
extern unsigned int htab_hash_searches;
-static int ppc_htab_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_htab_show, NULL);
-}
-
struct file_operations ppc_htab_operations = {
- .open = ppc_htab_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_htab_write,
- .release = single_release,
+ .llseek = ppc_htab_lseek,
+ .read = ppc_htab_read,
+ .write = ppc_htab_write,
};
static char *pmc1_lookup(unsigned long mmcr0)
* is _REALLY_ slow (see the nested for loops below) but nothing
* in here should be really timing critical. -- Cort
*/
-static int ppc_htab_show(struct seq_file *m, void *v)
+static ssize_t ppc_htab_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
{
unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
+ int n = 0;
#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE)
unsigned int kptes = 0, uptes = 0;
PTE *ptr;
#endif /* CONFIG_PPC_STD_MMU */
+ char buffer[512];
+
+ if (count < 0)
+ return -EINVAL;
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON) {
mmcr0 = mfspr(SPRN_MMCR0);
pmc1 = mfspr(SPRN_PMC1);
pmc2 = mfspr(SPRN_PMC2);
- seq_printf(m,
+ n += sprintf( buffer + n,
"604 Performance Monitoring\n"
"MMCR0\t\t: %08lx %s%s ",
mmcr0,
( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "",
( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : "");
- seq_printf(m,
+ n += sprintf( buffer + n,
"\nPMC1\t\t: %08lx (%s)\n"
"PMC2\t\t: %08lx (%s)\n",
pmc1, pmc1_lookup(mmcr0),
#ifdef CONFIG_PPC_STD_MMU
/* if we don't have a htab */
- if ( Hash_size == 0 ) {
- seq_printf(m, "No Hash Table used\n");
- return 0;
+ if ( Hash_size == 0 )
+ {
+ n += sprintf( buffer + n, "No Hash Table used\n");
+ goto return_string;
}
#ifndef CONFIG_PPC64BRIDGE
}
#endif
- seq_printf(m,
+ n += sprintf( buffer + n,
"PTE Hash Table Information\n"
"Size\t\t: %luKb\n"
"Buckets\t\t: %lu\n"
#endif
);
- seq_printf(m,
+ n += sprintf( buffer + n,
"Reloads\t\t: %lu\n"
"Preloads\t: %lu\n"
"Searches\t: %u\n"
"Evicts\t\t: %lu\n",
htab_reloads, htab_preloads, htab_hash_searches,
primary_pteg_full, htab_evicts);
+return_string:
#endif /* CONFIG_PPC_STD_MMU */
- seq_printf(m,
+ n += sprintf( buffer + n,
"Non-error misses: %lu\n"
"Error misses\t: %lu\n",
pte_misses, pte_errors);
- return 0;
+ if (*ppos >= strlen(buffer))
+ return 0;
+ if (n > strlen(buffer) - *ppos)
+ n = strlen(buffer) - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user(buf, buffer + *ppos, n))
+ return -EFAULT;
+ *ppos += n;
+ return n;
}
/*
unsigned long tmp;
char buffer[16];
- if (!capable(CAP_SYS_ADMIN))
+ if ( current->uid != 0 )
return -EACCES;
if (strncpy_from_user(buffer, ubuffer, 15))
return -EFAULT;
#endif /* CONFIG_PPC_STD_MMU */
}
+
+static long long
+ppc_htab_lseek(struct file * file, loff_t offset, int orig)
+{
+ long long ret = -EINVAL;
+
+ lock_kernel();
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ }
+ unlock_kernel();
+ return ret;
+}
+
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer_arg, size_t *lenp, loff_t *ppos)
+ void __user *buffer_arg, size_t *lenp)
{
int vleft, first=1, len, left, val;
char __user *buffer = (char __user *) buffer_arg;
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR))
return -EFAULT;
- if ( /*!table->maxlen ||*/ (*ppos && !write)) {
+ if ( /*!table->maxlen ||*/ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
}
if (!write && !first && left) {
- if(put_user('\n', (char __user *) buffer))
+ if(put_user('\n', (char *) buffer))
return -EFAULT;
left--, buffer++;
}
if (write) {
- char __user *s = (char __user *) buffer;
+ p = (char *) buffer;
while (left) {
char c;
- if(get_user(c, s++))
+ if(get_user(c, p++))
return -EFAULT;
if (!isspace(c))
break;
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
}
regs->gpr[1] = sp;
regs->msr = MSR_USER;
if (last_task_used_math == current)
- last_task_used_math = NULL;
+ last_task_used_math = 0;
if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
+ last_task_used_altivec = 0;
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
#endif
else
val = __unpack_fe01(tsk->thread.fpexc_mode);
- return put_user(val, (unsigned int __user *) adr);
+ return put_user(val, (unsigned int *) adr);
}
int sys_clone(unsigned long clone_flags, unsigned long usp,
/*
* Get contents of AltiVec register state in task TASK
*/
-static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
+static inline int get_vrregs(unsigned long *data, struct task_struct *task)
{
int i, j;
/*
* Write contents of AltiVec register state into task TASK.
*/
-static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
+static inline int set_vrregs(struct task_struct *task, unsigned long *data)
{
int i, j;
ret = -EIO;
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp,(unsigned long *) data);
break;
}
preempt_enable();
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp,(unsigned long *) data);
break;
}
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = get_vrregs((unsigned long __user *)data, child);
+ ret = get_vrregs((unsigned long *)data, child);
break;
case PTRACE_SETVRREGS:
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = set_vrregs(child, (unsigned long __user *)data);
+ ret = set_vrregs(child, (unsigned long *)data);
break;
#endif
#ifdef CONFIG_SPE
/* Get the child spe register state. */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = get_evrregs((unsigned long __user *)data, child);
+ ret = get_evrregs((unsigned long *)data, child);
break;
case PTRACE_SETEVRREGS:
* of register state from memory */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = set_evrregs(child, (unsigned long __user *)data);
+ ret = set_evrregs(child, (unsigned long *)data);
break;
#endif
}
__setup("l2cr=", ppc_setup_l2cr);
-#ifdef CONFIG_GENERIC_NVRAM
+#ifdef CONFIG_NVRAM
/* Generic nvram hooks used by drivers/char/gen_nvram.c */
unsigned char nvram_read_byte(int addr)
#ifdef CONFIG_XMON
xmon_map_scc();
if (strstr(cmd_line, "xmon"))
- xmon(NULL);
+ xmon(0);
#endif /* CONFIG_XMON */
if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
* altivec/spe instructions at some point.
*/
static int
-save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
+save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
{
/* save general and floating-point registers */
CHECK_FULL_REGS(regs);
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
- if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
+ if (__put_user(current->thread.vrsave, (u32 *)&frame->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
memset(¤t->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
+ if (__get_user(current->thread.vrsave, (u32 *)&sr->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
{
sigset_t set;
- struct mcontext __user *mcp;
+ struct mcontext *mcp;
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
|| __get_user(mcp, &ucp->uc_regs))
if (new_ctx == NULL)
return 0;
if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
- || __get_user(tmp, (u8 __user *) new_ctx)
- || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
+ || __get_user(tmp, (u8 *) new_ctx)
+ || __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
return -EFAULT;
/*
/* create a stack frame for the caller of the handler */
newsp -= __SIGNAL_FRAMESIZE;
- if (verify_area(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
+ if (verify_area(VERIFY_WRITE, (void *) newsp, origsp - newsp))
goto badframe;
#if _NSIG != 64
set.sig[1] = sigctx._unused[3];
restore_sigmask(&set);
- sr = (struct mcontext __user *) sigctx.regs;
+ sr = (struct mcontext *) sigctx.regs;
if (verify_area(VERIFY_READ, sr, sizeof(*sr))
|| restore_user_regs(regs, sr, 1))
goto badframe;
#include <linux/utsname.h>
#include <linux/file.h>
#include <linux/unistd.h>
-#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
break;
case SEMTIMEDOP:
ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, (const struct timespec __user *) fifth);
+ second, (const struct timespec *) fifth);
break;
case SEMGET:
ret = sys_semget (first, second, third);
if (!ptr)
break;
if ((ret = verify_area (VERIFY_READ, ptr, sizeof(long)))
- || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
+ || (ret = get_user(fourth.__pad, (void *__user *)ptr)))
break;
ret = sys_semctl (first, second, third, fourth);
break;
* sys_select() with the appropriate args. -- Cort
*/
int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+ppc_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
{
if ( (unsigned long)n >= 4096 )
{
unsigned long __user *buffer = (unsigned long __user *)n;
if (verify_area(VERIFY_READ, buffer, 5*sizeof(unsigned long))
|| __get_user(n, buffer)
- || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
- || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
- || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
+ || __get_user(inp, ((fd_set **)(buffer+1)))
+ || __get_user(outp, ((fd_set **)(buffer+2)))
+ || __get_user(exp, ((fd_set **)(buffer+3)))
+ || __get_user(tvp, ((struct timeval **)(buffer+4))))
return -EFAULT;
}
return sys_select(n, inp, outp, exp, tvp);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void __user *) addr;
+ info.si_addr = (void *) addr;
force_sig_info(signr, &info, current);
}
unsigned int va, vb, vc, vd;
vector128 *vrs;
- if (get_user(instr, (unsigned int __user *) regs->nip))
+ if (get_user(instr, (unsigned int *) regs->nip))
return -EFAULT;
if ((instr >> 26) != 4)
return -EINVAL; /* not an altivec instruction */
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
info->alignment = alignment;
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 1) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Perfect fit */
if (bs == s && be == e) {
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 1) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Just fits */
if (blk->size == size) {
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 2) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Perfect fit */
if (bs == s && be == e) {
}
/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ * Configure PPC44x TLB for AS0 exception processing.
*/
-void __init MMU_init_hw(void)
-{
- flush_instruction_cache();
-}
-
-unsigned long __init mmu_mapin_ram(void)
+static void __init
+ppc44x_tlb_config(void)
{
unsigned int pinned_tlbs = 1;
int i;
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+ flush_instruction_cache();
+
+ ppc44x_tlb_config();
+}
+
+/* TODO: Add large page lowmem mapping support */
+unsigned long __init mmu_mapin_ram(void)
+{
+ unsigned long v, s, f = _PAGE_GUARDED;
+ phys_addr_t p;
+
+ v = KERNELBASE;
+ p = PPC_MEMSTART;
+
+ for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
+ if ((char *) v >= _stext && (char *) v < etext)
+ f |= _PAGE_RAM_TEXT;
+ else
+ f |= _PAGE_RAM;
+ map_page(v, p, f);
+ v += PAGE_SIZE;
+ p += PAGE_SIZE;
+ }
+
+ if (ppc_md.progress)
+ ppc_md.progress("MMU:mmu_mapin_ram done", 0x401);
- return total_lowmem;
+ return s;
}
# Makefile for the linux ppc-specific parts of the memory manager.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+
obj-y := fault.o init.o mem_pieces.o \
mmu_context.o pgtable.o
{
unsigned int inst;
- if (get_user(inst, (unsigned int __user *)regs->nip))
+ if (get_user(inst, (unsigned int *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void __user *) address;
+ info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current);
return 0;
}
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
+ info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
return SIGBUS;
depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
-config PPC4xx_DMA
- bool "PPC4xx DMA controller support"
- depends on 4xx
-
-config PPC4xx_EDMA
- bool
- depends on !STB03xxx && PPC4xx_DMA
- default y
-
config PM
bool "Power Management support (EXPERIMENTAL)"
depends on 4xx && EXPERIMENTAL
#define UART0_IO_BASE (u8 *) 0xE0000200
#define UART1_IO_BASE (u8 *) 0xE0000300
-/* external Epson SG-615P */
-#define BASE_BAUD 691200
+#define BASE_BAUD 33000000/3/16
#define STD_UART_OP(num) \
{ 0, BASE_BAUD, 0, UART##num##_INT, \
default MPC8540_ADS
config MPC8540_ADS
- bool "Freescale MPC8540 ADS"
+ bool "MPC8540ADS"
help
This option enables support for the MPC 8540 ADS evaluation board.
-config MPC8555_CDS
- bool "Freescale MPC8555 CDS"
- help
- This option enablese support for the MPC8555 CDS evaluation board.
-
-config MPC8560_ADS
- bool "Freescale MPC8560 ADS"
- help
- This option enables support for the MPC 8560 ADS evaluation board.
-
config SBC8560
bool "WindRiver PowerQUICC III SBC8560"
help
depends on MPC8540_ADS
default y
-config MPC8555
- bool
- depends on MPC8555_CDS
- default y
-
config MPC8560
bool
- depends on SBC8560 || MPC8560_ADS
- default y
-
-config 85xx_PCI2
- bool "Supprt for 2nd PCI host controller"
- depends on MPC8555_CDS
+ depends on SBC8560
default y
config FSL_OCP
config PPC_GEN550
bool
- depends on MPC8540 || SBC8560 || MPC8555
+ depends on MPC8540 || SBC8560
default y
endmenu
#
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o
-obj-$(CONFIG_MPC8555_CDS) += mpc85xx_cds_common.o
-obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads_common.o mpc8560_ads.o
obj-$(CONFIG_SBC8560) += sbc85xx.o sbc8560.o
obj-$(CONFIG_MPC8540) += mpc8540.o
-obj-$(CONFIG_MPC8555) += mpc8555.o
obj-$(CONFIG_MPC8560) += mpc8560.o
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
-#include <linux/initrd.h>
#include <linux/module.h>
#include <asm/system.h>
#define __MACH_MPC8540ADS_H__
#include <linux/config.h>
+#include <linux/serial.h>
#include <linux/initrd.h>
#include <syslib/ppc85xx_setup.h>
#include <platforms/85xx/mpc85xx_ads_common.h>
+++ /dev/null
-/*
- * arch/ppc/platform/85xx/mpc8555.c
- *
- * MPC8555 I/O descriptions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <asm/mpc85xx.h>
-#include <asm/ocp.h>
-
-/* These should be defined in platform code */
-extern struct ocp_gfar_data mpc85xx_tsec1_def;
-extern struct ocp_gfar_data mpc85xx_tsec2_def;
-extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def;
-
-/* We use offsets for paddr since we do not know at compile time
- * what CCSRBAR is, platform code should fix this up in
- * setup_arch
- *
- * Only the first IRQ is given even if a device has
- * multiple lines associated with ita
- */
-struct ocp_def core_ocp[] = {
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_IIC,
- .index = 0,
- .paddr = MPC85xx_IIC1_OFFSET,
- .irq = MPC85xx_IRQ_IIC1,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_i2c1_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_16550,
- .index = 0,
- .paddr = MPC85xx_UART0_OFFSET,
- .irq = MPC85xx_IRQ_DUART,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_16550,
- .index = 1,
- .paddr = MPC85xx_UART1_OFFSET,
- .irq = MPC85xx_IRQ_DUART,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_GFAR,
- .index = 0,
- .paddr = MPC85xx_ENET1_OFFSET,
- .irq = MPC85xx_IRQ_TSEC1_TX,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_tsec1_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_GFAR,
- .index = 1,
- .paddr = MPC85xx_ENET2_OFFSET,
- .irq = MPC85xx_IRQ_TSEC2_TX,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_tsec2_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_DMA,
- .index = 0,
- .paddr = MPC85xx_DMA_OFFSET,
- .irq = MPC85xx_IRQ_DMA0,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_PERFMON,
- .index = 0,
- .paddr = MPC85xx_PERFMON_OFFSET,
- .irq = MPC85xx_IRQ_PERFMON,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_INVALID
- }
-};
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc8555_cds.h
- *
- * MPC8555CDS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC8555CDS_H__
-#define __MACH_MPC8555CDS_H__
-
-#include <linux/config.h>
-#include <linux/serial.h>
-#include <platforms/85xx/mpc85xx_cds_common.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
-
-#endif /* __MACH_MPC8555CDS_H__ */
+++ /dev/null
-/*
- * arch/ppc/platforms/85xx/mpc8560_ads.c
- *
- * MPC8560ADS board specific routines
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/serial.h>
-#include <linux/tty.h> /* for linux/serial_core.h */
-#include <linux/serial_core.h>
-#include <linux/initrd.h>
-#include <linux/module.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/open_pic.h>
-#include <asm/bootinfo.h>
-#include <asm/pci-bridge.h>
-#include <asm/mpc85xx.h>
-#include <asm/irq.h>
-#include <asm/immap_85xx.h>
-#include <asm/kgdb.h>
-#include <asm/ocp.h>
-#include <asm/cpm2.h>
-#include <mm/mmu_decl.h>
-
-#include <syslib/cpm2_pic.h>
-#include <syslib/ppc85xx_common.h>
-#include <syslib/ppc85xx_setup.h>
-
-extern void cpm2_reset(void);
-
-struct ocp_gfar_data mpc85xx_tsec1_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC1_TX,
- .interruptError = MPC85xx_IRQ_TSEC1_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC1_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
- | GFAR_HAS_RMON | GFAR_HAS_COALESCE
- | GFAR_HAS_PHY_INTR),
- .phyid = 0,
- .phyregidx = 0,
-};
-
-struct ocp_gfar_data mpc85xx_tsec2_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC2_TX,
- .interruptError = MPC85xx_IRQ_TSEC2_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC2_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
- | GFAR_HAS_RMON | GFAR_HAS_COALESCE
- | GFAR_HAS_PHY_INTR),
- .phyid = 1,
- .phyregidx = 0,
-};
-
-struct ocp_fs_i2c_data mpc85xx_i2c1_def = {
- .flags = FS_I2C_SEPARATE_DFSRR,
-};
-
-/* ************************************************************************
- *
- * Setup the architecture
- *
- */
-
-static void __init
-mpc8560ads_setup_arch(void)
-{
- struct ocp_def *def;
- struct ocp_gfar_data *einfo;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- cpm2_reset();
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc8560ads_setup_arch()", 0);
-
- /* Set loops_per_jiffy to a half-way reasonable value,
- for use until calibrate_delay gets called. */
- loops_per_jiffy = freq / HZ;
-
-#ifdef CONFIG_PCI
- /* setup PCI host bridges */
- mpc85xx_setup_hose();
-#endif
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6);
- }
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- ROOT_DEV = Root_RAM0;
- else
-#endif
-#ifdef CONFIG_ROOT_NFS
- ROOT_DEV = Root_NFS;
-#else
- ROOT_DEV = Root_HDA1;
-#endif
-
- ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base));
-}
-
-static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs)
-{
- while ((irq = cpm2_get_irq(regs)) >= 0) {
- ppc_irq_dispatch_handler(regs, irq);
- }
- return IRQ_HANDLED;
-}
-
-static void __init
-mpc8560_ads_init_IRQ(void)
-{
- int i;
- volatile cpm2_map_t *immap = cpm2_immr;
-
- /* Setup OpenPIC */
- mpc85xx_ads_init_IRQ();
-
- /* disable all CPM interupts */
- immap->im_intctl.ic_simrh = 0x0;
- immap->im_intctl.ic_simrl = 0x0;
-
- for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++)
- irq_desc[i].handler = &cpm2_pic;
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- immap->im_intctl.ic_sicr = 0;
- immap->im_intctl.ic_scprrh = 0x05309770;
- immap->im_intctl.ic_scprrl = 0x05309770;
-
- request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL);
-
- return;
-}
-
-
-
-/* ************************************************************************ */
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* parse_bootinfo must always be called first */
- parse_bootinfo(find_bootinfo());
-
- /*
- * If we were passed in a board information, copy it into the
- * residual data area.
- */
- if (r3) {
- memcpy((void *) __res, (void *) (r3 + KERNELBASE),
- sizeof (bd_t));
-
- }
-#if defined(CONFIG_BLK_DEV_INITRD)
- /*
- * If the init RAM disk has been configured in, and there's a valid
- * starting address for it, set it up.
- */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- /* Copy the kernel command line arguments to a safe place. */
-
- if (r6) {
- *(char *) (r7 + KERNELBASE) = 0;
- strcpy(cmd_line, (char *) (r6 + KERNELBASE));
- }
-
- /* setup the PowerPC module struct */
- ppc_md.setup_arch = mpc8560ads_setup_arch;
- ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
-
- ppc_md.init_IRQ = mpc8560_ads_init_IRQ;
- ppc_md.get_irq = openpic_get_irq;
-
- ppc_md.restart = mpc85xx_restart;
- ppc_md.power_off = mpc85xx_power_off;
- ppc_md.halt = mpc85xx_halt;
-
- ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
-
- ppc_md.time_init = NULL;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc8560ads_init(): exit", 0);
-
- return;
-}
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc8560_ads.h
- *
- * MPC8540ADS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC8560ADS_H
-#define __MACH_MPC8560ADS_H
-
-#include <linux/config.h>
-#include <syslib/ppc85xx_setup.h>
-#include <platforms/85xx/mpc85xx_ads_common.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
-#define PHY_INTERRUPT MPC85xx_IRQ_EXT7
-
-#endif /* __MACH_MPC8560ADS_H */
+++ /dev/null
-/*
- * arch/ppc/platform/85xx/mpc85xx_cds_common.c
- *
- * MPC85xx CDS board specific routines
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/serial.h>
-#include <linux/module.h>
-#include <linux/root_dev.h>
-#include <linux/initrd.h>
-#include <linux/tty.h>
-#include <linux/serial_core.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/open_pic.h>
-#include <asm/bootinfo.h>
-#include <asm/pci-bridge.h>
-#include <asm/mpc85xx.h>
-#include <asm/irq.h>
-#include <asm/immap_85xx.h>
-#include <asm/immap_cpm2.h>
-#include <asm/ocp.h>
-#include <asm/kgdb.h>
-
-#include <mm/mmu_decl.h>
-#include <syslib/cpm2_pic.h>
-#include <syslib/ppc85xx_common.h>
-#include <syslib/ppc85xx_setup.h>
-
-
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
-extern unsigned long total_memory; /* in mm/init */
-
-unsigned char __res[sizeof (bd_t)];
-
-static int cds_pci_slot = 2;
-static volatile u8 * cadmus;
-
-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
-
-static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
-#if defined(CONFIG_PCI)
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 0: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 1: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI1 slot */
-#else
- 0x0, /* External 0: */
- 0x0, /* External 1: */
- 0x0, /* External 2: */
- 0x0, /* External 3: */
-#endif
- 0x0, /* External 4: */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
- 0x0, /* External 6: */
- 0x0, /* External 7: */
- 0x0, /* External 8: */
- 0x0, /* External 9: */
- 0x0, /* External 10: */
-#if defined(CONFIG_85xx_PCI2) && defined(CONFIG_PCI)
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 11: PCI2 slot 0 */
-#else
- 0x0, /* External 11: */
-#endif
-};
-
-struct ocp_gfar_data mpc85xx_tsec1_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC1_TX,
- .interruptError = MPC85xx_IRQ_TSEC1_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC1_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR |
- GFAR_HAS_PHY_INTR),
- .phyid = 0,
- .phyregidx = 0,
-};
-
-struct ocp_gfar_data mpc85xx_tsec2_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC2_TX,
- .interruptError = MPC85xx_IRQ_TSEC2_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC2_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR |
- GFAR_HAS_PHY_INTR),
- .phyid = 1,
- .phyregidx = 0,
-};
-
-struct ocp_fs_i2c_data mpc85xx_i2c1_def = {
- .flags = FS_I2C_SEPARATE_DFSRR,
-};
-
-/* ************************************************************************ */
-int
-mpc85xx_cds_show_cpuinfo(struct seq_file *m)
-{
- uint pvid, svid, phid1;
- uint memsize = total_memory;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
-
- seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
- seq_printf(m, "Machine\t\t: CDS (%x)\n", cadmus[CM_VER]);
- seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000,
- freq % 1000000);
- seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
- seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-
- /* Display cpu Pll setting */
- phid1 = mfspr(HID1);
- seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
- /* Display the amount of memory */
- seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
-
- return 0;
-}
-
-#ifdef CONFIG_CPM2
-static void cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs)
-{
- while((irq = cpm2_get_irq(regs)) >= 0)
- {
- ppc_irq_dispatch_handler(regs,irq);
- }
-}
-#endif /* CONFIG_CPM2 */
-
-void __init
-mpc85xx_cds_init_IRQ(void)
-{
- bd_t *binfo = (bd_t *) __res;
-#ifdef CONFIG_CPM2
- volatile cpm2_map_t *immap = cpm2_immr;
- int i;
-#endif
-
- /* Determine the Physical Address of the OpenPIC regs */
- phys_addr_t OpenPIC_PAddr = binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
- OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
- OpenPIC_InitSenses = mpc85xx_cds_openpic_initsenses;
- OpenPIC_NumInitSenses = sizeof (mpc85xx_cds_openpic_initsenses);
-
- /* Skip reserved space and internal sources */
- openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
- /* Map PIC IRQs 0-11 */
- openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000);
-
- /* we let openpic interrupts starting from an offset, to
- * leave space for cascading interrupts underneath.
- */
- openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
-
-#ifdef CONFIG_CPM2
- /* disable all CPM interupts */
- immap->im_intctl.ic_simrh = 0x0;
- immap->im_intctl.ic_simrl = 0x0;
-
- for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++)
- irq_desc[i].handler = &cpm2_pic;
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- immap->im_intctl.ic_sicr = 0;
- immap->im_intctl.ic_scprrh = 0x05309770;
- immap->im_intctl.ic_scprrl = 0x05309770;
-
- request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL);
-#endif
-
- return;
-}
-
-#ifdef CONFIG_PCI
-/*
- * interrupt routing
- */
-int
-mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
-{
- struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-
- if (!hose->index)
- {
- /* Handle PCI1 interrupts */
- char pci_irq_table[][4] =
- /*
- * PCI IDSEL/INTPIN->INTLINE
- * A B C D
- */
-
- /* Note IRQ assignment for slots is based on which slot the elysium is
- * in -- in this setup elysium is in slot #2 (this PIRQA as first
- * interrupt on slot */
- {
- { 0, 1, 2, 3 }, /* 16 - PMC */
- { 3, 0, 0, 0 }, /* 17 P2P (Tsi320) */
- { 0, 1, 2, 3 }, /* 18 - Slot 1 */
- { 1, 2, 3, 0 }, /* 19 - Slot 2 */
- { 2, 3, 0, 1 }, /* 20 - Slot 3 */
- { 3, 0, 1, 2 }, /* 21 - Slot 4 */
- };
-
- const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
- int i, j;
-
- for (i = 0; i < 6; i++)
- for (j = 0; j < 4; j++)
- pci_irq_table[i][j] =
- ((pci_irq_table[i][j] + 5 -
- cds_pci_slot) & 0x3) + PIRQ0A;
-
- return PCI_IRQ_TABLE_LOOKUP;
- } else {
- /* Handle PCI2 interrupts (if we have one) */
- char pci_irq_table[][4] =
- {
- /*
- * We only have one slot and one interrupt
- * going to PIRQA - PIRQD */
- { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
- };
-
- const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
-
- return PCI_IRQ_TABLE_LOOKUP;
- }
-}
-
-#define ARCADIA_HOST_BRIDGE_IDSEL 17
-#define ARCADIA_2ND_BRIDGE_IDSEL 3
-
-int
-mpc85xx_exclude_device(u_char bus, u_char devfn)
-{
- if (bus == 0 && PCI_SLOT(devfn) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-#if CONFIG_85xx_PCI2
- /* With the current code we know PCI2 will be bus 2, however this may
- * not be guarnteed */
- if (bus == 2 && PCI_SLOT(devfn) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-#endif
- /* We explicitly do not go past the Tundra 320 Bridge */
- if (bus == 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
- return PCIBIOS_DEVICE_NOT_FOUND;
- else
- return PCIBIOS_SUCCESSFUL;
-}
-#endif /* CONFIG_PCI */
-
-/* ************************************************************************
- *
- * Setup the architecture
- *
- */
-static void __init
-mpc85xx_cds_setup_arch(void)
-{
- struct ocp_def *def;
- struct ocp_gfar_data *einfo;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- printk("mpc85xx_cds_setup_arch\n");
-
-#ifdef CONFIG_CPM2
- cpm2_reset();
-#endif
-
- cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE);
- cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1;
- printk("CDS Version = %x in PCI slot %d\n", cadmus[CM_VER], cds_pci_slot);
-
- /* Set loops_per_jiffy to a half-way reasonable value,
- for use until calibrate_delay gets called. */
- loops_per_jiffy = freq / HZ;
-
-#ifdef CONFIG_PCI
- /* setup PCI host bridges */
- mpc85xx_setup_hose();
-#endif
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
-#ifdef CONFIG_SERIAL_8250
- mpc85xx_early_serial_map();
-#endif
-
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- /* Invalidate the entry we stole earlier the serial ports
- * should be properly mapped */
- invalidate_tlbcam_entry(NUM_TLBCAMS - 1);
-#endif
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6);
- }
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- ROOT_DEV = Root_RAM0;
- else
-#endif
-#ifdef CONFIG_ROOT_NFS
- ROOT_DEV = Root_NFS;
-#else
- ROOT_DEV = Root_HDA1;
-#endif
-
- ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base));
-}
-
-/* ************************************************************************ */
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* parse_bootinfo must always be called first */
- parse_bootinfo(find_bootinfo());
-
- /*
- * If we were passed in a board information, copy it into the
- * residual data area.
- */
- if (r3) {
- memcpy((void *) __res, (void *) (r3 + KERNELBASE),
- sizeof (bd_t));
-
- }
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- {
- bd_t *binfo = (bd_t *) __res;
-
- /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
- settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base,
- binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
-
- }
-#endif
-
-#if defined(CONFIG_BLK_DEV_INITRD)
- /*
- * If the init RAM disk has been configured in, and there's a valid
- * starting address for it, set it up.
- */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- /* Copy the kernel command line arguments to a safe place. */
-
- if (r6) {
- *(char *) (r7 + KERNELBASE) = 0;
- strcpy(cmd_line, (char *) (r6 + KERNELBASE));
- }
-
- /* setup the PowerPC module struct */
- ppc_md.setup_arch = mpc85xx_cds_setup_arch;
- ppc_md.show_cpuinfo = mpc85xx_cds_show_cpuinfo;
-
- ppc_md.init_IRQ = mpc85xx_cds_init_IRQ;
- ppc_md.get_irq = openpic_get_irq;
-
- ppc_md.restart = mpc85xx_restart;
- ppc_md.power_off = mpc85xx_power_off;
- ppc_md.halt = mpc85xx_halt;
-
- ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
-
- ppc_md.time_init = NULL;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
-
-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
- ppc_md.progress = gen550_progress;
-#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
-
- if (ppc_md.progress)
- ppc_md.progress("mpc85xx_cds_init(): exit", 0);
-
- return;
-}
+++ /dev/null
-/*
- * arch/ppc/platforms/85xx/mpc85xx_cds_common.h
- *
- * MPC85xx CDS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC85XX_CDS_H__
-#define __MACH_MPC85XX_CDS_H__
-
-#include <linux/config.h>
-#include <linux/serial.h>
-#include <asm/ppcboot.h>
-#include <linux/initrd.h>
-#include <syslib/ppc85xx_setup.h>
-
-#define BOARD_CCSRBAR ((uint)0xe0000000)
-#define CCSRBAR_SIZE ((uint)1024*1024)
-
-/* CADMUS info */
-#define CADMUS_BASE (0xf8004000)
-#define CADMUS_SIZE (256)
-#define CM_VER (0)
-#define CM_CSR (1)
-#define CM_RST (2)
-
-/* PCI config */
-#define PCI1_CFG_ADDR_OFFSET (0x8000)
-#define PCI1_CFG_DATA_OFFSET (0x8004)
-
-#define PCI2_CFG_ADDR_OFFSET (0x9000)
-#define PCI2_CFG_DATA_OFFSET (0x9004)
-
-/* PCI interrupt controller */
-#define PIRQ0A MPC85xx_IRQ_EXT0
-#define PIRQ0B MPC85xx_IRQ_EXT1
-#define PIRQ0C MPC85xx_IRQ_EXT2
-#define PIRQ0D MPC85xx_IRQ_EXT3
-#define PIRQ1A MPC85xx_IRQ_EXT11
-
-/* PCI 1 memory map */
-#define MPC85XX_PCI1_LOWER_IO 0x00000000
-#define MPC85XX_PCI1_UPPER_IO 0x00ffffff
-
-#define MPC85XX_PCI1_LOWER_MEM 0x80000000
-#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff
-
-#define MPC85XX_PCI1_IO_BASE 0xe2000000
-#define MPC85XX_PCI1_MEM_OFFSET 0x00000000
-
-#define MPC85XX_PCI1_IO_SIZE 0x01000000
-
-/* PCI 2 memory map */
-#define MPC85XX_PCI2_LOWER_IO 0x01000000
-#define MPC85XX_PCI2_UPPER_IO 0x01ffffff
-
-#define MPC85XX_PCI2_LOWER_MEM 0xa0000000
-#define MPC85XX_PCI2_UPPER_MEM 0xbfffffff
-
-#define MPC85XX_PCI2_IO_BASE 0xe3000000
-#define MPC85XX_PCI2_MEM_OFFSET 0x00000000
-
-#define MPC85XX_PCI2_IO_SIZE 0x01000000
-
-#define SERIAL_PORT_DFNS \
- STD_UART_OP(0) \
- STD_UART_OP(1)
-
-#endif /* __MACH_MPC85XX_CDS_H__ */
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
-#include <linux/initrd.h>
#include <linux/module.h>
-#include <linux/initrd.h>
#include <asm/system.h>
#include <asm/pgtable.h>
uart_req.iotype = SERIAL_IO_MEM;
uart_req.mapbase = UARTA_ADDR;
uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART0_SIZE);
- uart_req.type = PORT_16650;
#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
gen550_init(0, &uart_req);
#define __MACH_SBC8560_H__
#include <linux/config.h>
+#include <linux/serial.h>
#include <platforms/85xx/sbc85xx.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
#define BASE_BAUD ( 1843200 / 16 )
#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_SKIP_TEST|ASYNC_AUTO_IRQ)
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST|ASYNC_AUTO_IRQ)
#else
-#define STD_COM_FLAGS (ASYNC_SKIP_TEST)
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST)
#endif
-
+
#define STD_SERIAL_PORT_DFNS \
{ 0, BASE_BAUD, UARTA_ADDR, MPC85xx_IRQ_EXT9, STD_COM_FLAGS, /* ttyS0 */ \
iomem_base: (u8 *)UARTA_ADDR, \
/* Internal interrupts are all Level Sensitive, and Positive Polarity */
static u_char sbc8560_openpic_initsenses[] __initdata = {
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */
+ (IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */
+ (IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
+ (IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
+ (IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
+ (IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
+ (IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
+ (IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
+ (IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */
+ (IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */
+ (IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */
+ (IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */
+ (IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */
+ (IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */
+ (IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */
+ (IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */
+ (IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */
+ (IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */
+ (IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */
+ (IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */
+ (IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */
+ (IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
+ (IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
+ (IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
+ (IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */
+ (IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
0x0, /* External 0: */
0x0, /* External 1: */
#if defined(CONFIG_PCI)
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_40x
+EXTRA_AFLAGS := -Wa,-m405
+endif
+
# Extra CFLAGS so we don't have to do relative includes
CFLAGS_pmac_setup.o += -Iarch/$(ARCH)/mm
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac_cpufreq.o
endif
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
+obj-$(CONFIG_PPC_RTAS) += error_log.o proc_rtas.o
obj-$(CONFIG_PREP_RESIDUAL) += residual.o
obj-$(CONFIG_ADIR) += adir_setup.o adir_pic.o adir_pci.o
obj-$(CONFIG_EST8260) += est8260_setup.o
obj-$(CONFIG_PPLUS) += pplus.o
obj-$(CONFIG_PRPMC750) += prpmc750.o
obj-$(CONFIG_PRPMC800) += prpmc800.o
-obj-$(CONFIG_RPX8260) += rpx8260.o
obj-$(CONFIG_SANDPOINT) += sandpoint.o
obj-$(CONFIG_SBC82xx) += sbc82xx.o
obj-$(CONFIG_SPRUCE) += spruce.o
-obj-$(CONFIG_LITE5200) += lite5200.o mpc5200.o
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o
--- /dev/null
+/*
+ * arch/ppc/kernel/error_log.c
+ *
+ * Copyright (c) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * Error processing of errors found by rtas even-scan routine
+ * which is done with every heartbeat. (chrp_setup.c)
+ */
+
+#include <linux/sched.h>
+
+#include <asm/prom.h>
+
+#include "error_log.h"
+
+/* ****************************************************************** */
+/*
+ * EVENT-SCAN
+ * The whole stuff below here doesn't take any action when it found
+ * an error, it just prints as much information as possible and
+ * then its up to the user to decide what to do.
+ *
+ * Returns 0 if no errors were found
+ * Returns 1 if there may be more errors
+ */
+int ppc_rtas_errorlog_scan(void)
+{
+const char *_errlog_severity[] = {
+#ifdef VERBOSE_ERRORS
+ "No Error\n\t\
+Should require no further information",
+ "Event\n\t\
+This is not really an error, it is an event. I use events\n\t\
+to communicate with RTAS back and forth.",
+ "Warning\n\t\
+Indicates a non-state-losing error, either fully recovered\n\t\
+by RTAS or not needing recovery. Ignore it.",
+ "Error sync\n\t\
+May only be fatal to a certain program or thread. Recovery\n\t\
+and continuation is possible, if I only had a handler for\n\t\
+this. Less serious",
+ "Error\n\t\
+Less serious, but still causing a loss of data and state.\n\t\
+I can't tell you exactly what to do, You have to decide\n\t\
+with help from the target and initiator field, what kind\n\t\
+of further actions may take place.",
+ "Fatal\n\t\
+Represent a permanent hardware failure and I believe this\n\t\
+affects my overall performance and behaviour. I would not\n\t\
+attempt to continue normal operation."
+#else
+ "No Error",
+ "Event",
+ "Warning",
+ "Error sync",
+ "Error",
+ "Fatal"
+#endif /* VERBOSE_ERRORS */
+};
+
+#if 0 /* unused?? */
+const char *_errlog_disposition[] = {
+#ifdef VERBOSE_ERRORS
+ "Fully recovered\n\t\
+There was an error, but it is fully recovered by RTAS.",
+ "Limited recovery\n\t\
+RTAS was able to recover the state of the machine, but some\n\t\
+feature of the machine has been disabled or lost (for example\n\t\
+error checking) or performance may suffer.",
+ "Not recovered\n\t\
+Whether RTAS did not try to recover anything or recovery failed:\n\t\
+HOUSTON, WE HAVE A PROBLEM!"
+#else
+ "Fully recovered",
+ "Limited recovery",
+ "Not recovered"
+#endif /* VERBOSE_ERRORS */
+};
+#endif
+
+const char *_errlog_extended[] = {
+#ifdef VERBOSE_ERRORS
+ "Not present\n\t\
+Sad, the RTAS call didn't return an extended error log.",
+ "Present\n\t\
+The extended log is present and hopefully it contains a lot of\n\t\
+useful information, which leads to the solution of the problem."
+#else
+ "Not present",
+ "Present"
+#endif /* VERBOSE_ERRORS */
+};
+
+const char *_errlog_initiator[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+
+const char *_errlog_target[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+ rtas_error_log error_log;
+ char logdata[1024];
+ int error;
+#if 0 /* unused?? */
+ int retries = 0; /* if HW error, try 10 times */
+#endif
+
+ error = call_rtas ("event-scan", 4, 1, (unsigned long *)&error_log,
+ INTERNAL_ERROR | EPOW_WARNING,
+ 0, __pa(logdata), 1024);
+
+ if (error == 1) /* no errors found */
+ return 0;
+
+ if (error == -1) {
+ printk(KERN_ERR "Unable to get errors. Do you a favor and throw this box away\n");
+ return 0;
+ }
+ if (error_log.version != 1)
+ printk(KERN_WARNING "Unknown version (%d), please implement me\n",
+ error_log.version);
+
+ switch (error_log.disposition) {
+ case DISP_FULLY_RECOVERED:
+ /* there was an error, but everything is fine now */
+ return 0;
+ case DISP_NOT_RECOVERED:
+ printk("We have a really serious Problem!\n");
+ case DISP_LIMITED_RECOVERY:
+ printk("Error classification\n");
+ printk("Severity : %s\n",
+ ppc_rtas_errorlog_check_severity (error_log));
+ printk("Initiator : %s\n",
+ ppc_rtas_errorlog_check_initiator (error_log));
+ printk("Target : %s\n",
+ ppc_rtas_errorlog_check_target (error_log));
+ printk("Type : %s\n",
+ ppc_rtas_errorlog_check_type (error_log));
+ printk("Ext. log : %s\n",
+ ppc_rtas_errorlog_check_extended (error_log));
+ if (error_log.extended)
+ ppc_rtas_errorlog_disect_extended (logdata);
+ return 1;
+ default:
+ /* nothing */
+ break;
+ }
+ return 0;
+}
+/* ****************************************************************** */
+const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log)
+{
+ const char *_errlog_type[] = {
+ "unknown type",
+ "too many tries failed",
+ "TCE error",
+ "RTAS device failed",
+ "target timed out",
+ "parity error on data", /* 5 */
+ "parity error on address",
+ "parity error on external cache",
+ "access to invalid address",
+ "uncorrectable ECC error",
+ "corrected ECC error" /* 10 */
+ };
+ if (error_log.type == TYPE_EPOW)
+ return "EPOW";
+ if (error_log.type >= TYPE_PMGM_POWER_SW_ON)
+ return "PowerMGM Event (not handled right now)";
+ return _errlog_type[error_log.type];
+}
+
--- /dev/null
+#ifndef __ERROR_LOG_H__
+#define __ERROR_LOG_H__
+
+#define VERBOSE_ERRORS 1 /* Maybe I enlarge the kernel too much */
+#undef VERBOSE_ERRORS
+
+/* Event classes */
+/* XXX: Endianess correct? NOW*/
+#define INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define EPOW_WARNING 0x40000000 /* set bit 1 */
+#define POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+
+/* event-scan returns */
+#define SEVERITY_FATAL 0x5
+#define SEVERITY_ERROR 0x4
+#define SEVERITY_ERROR_SYNC 0x3
+#define SEVERITY_WARNING 0x2
+#define SEVERITY_EVENT 0x1
+#define SEVERITY_NO_ERROR 0x0
+#define DISP_FULLY_RECOVERED 0x0
+#define DISP_LIMITED_RECOVERY 0x1
+#define DISP_NOT_RECOVERED 0x2
+#define PART_PRESENT 0x0
+#define PART_NOT_PRESENT 0x1
+#define INITIATOR_UNKNOWN 0x0
+#define INITIATOR_CPU 0x1
+#define INITIATOR_PCI 0x2
+#define INITIATOR_ISA 0x3
+#define INITIATOR_MEMORY 0x4
+#define INITIATOR_POWERMGM 0x5
+#define TARGET_UNKNOWN 0x0
+#define TARGET_CPU 0x1
+#define TARGET_PCI 0x2
+#define TARGET_ISA 0x3
+#define TARGET_MEMORY 0x4
+#define TARGET_POWERMGM 0x5
+#define TYPE_RETRY 0x01
+#define TYPE_TCE_ERR 0x02
+#define TYPE_INTERN_DEV_FAIL 0x03
+#define TYPE_TIMEOUT 0x04
+#define TYPE_DATA_PARITY 0x05
+#define TYPE_ADDR_PARITY 0x06
+#define TYPE_CACHE_PARITY 0x07
+#define TYPE_ADDR_INVALID 0x08
+#define TYPE_ECC_UNCORR 0x09
+#define TYPE_ECC_CORR 0x0a
+#define TYPE_EPOW 0x40
+/* I don't add PowerMGM events right now, this is a different topic */
+#define TYPE_PMGM_POWER_SW_ON 0x60
+#define TYPE_PMGM_POWER_SW_OFF 0x61
+#define TYPE_PMGM_LID_OPEN 0x62
+#define TYPE_PMGM_LID_CLOSE 0x63
+#define TYPE_PMGM_SLEEP_BTN 0x64
+#define TYPE_PMGM_WAKE_BTN 0x65
+#define TYPE_PMGM_BATTERY_WARN 0x66
+#define TYPE_PMGM_BATTERY_CRIT 0x67
+#define TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define TYPE_PMGM_SWITCH_TO_AC 0x69
+#define TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define TYPE_PMGM_RING_INDICATE 0x6d
+#define TYPE_PMGM_LAN_ATTENTION 0x6e
+#define TYPE_PMGM_TIME_ALARM 0x6f
+#define TYPE_PMGM_CONFIG_CHANGE 0x70
+#define TYPE_PMGM_SERVICE_PROC 0x71
+
+typedef struct _rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+} rtas_error_log;
+
+/* ****************************************************************** */
+#define ppc_rtas_errorlog_check_severity(x) \
+ (_errlog_severity[x.severity])
+#define ppc_rtas_errorlog_check_target(x) \
+ (_errlog_target[x.target])
+#define ppc_rtas_errorlog_check_initiator(x) \
+ (_errlog_initiator[x.initiator])
+#define ppc_rtas_errorlog_check_extended(x) \
+ (_errlog_extended[x.extended])
+#define ppc_rtas_errorlog_disect_extended(x) \
+ do { /* implement me */ } while(0)
+extern const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log);
+extern int ppc_rtas_errorlog_scan(void);
+
+
+#endif /* __ERROR_LOG_H__ */
+++ /dev/null
-/*
- * arch/ppc/platforms/lite5200.c
- *
- * Platform support file for the Freescale LITE5200 based on MPC52xx.
- * A maximum of this file should be moved to syslib/mpc52xx_?????
- * so that new platform based on MPC52xx need a minimal platform file
- * ( avoid code duplication )
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based on the 2.4 code written by Kent Borg,
- * Dale Farnsworth <dale.farnsworth@mvista.com> and
- * Wolfgang Denk <wd@denx.de>
- *
- * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright 2003 Motorola Inc.
- * Copyright 2003 MontaVista Software Inc.
- * Copyright 2003 DENX Software Engineering (wd@denx.de)
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/initrd.h>
-#include <linux/seq_file.h>
-#include <linux/kdev_t.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/ocp.h>
-#include <asm/mpc52xx.h>
-
-
-/* Board data given by U-Boot */
-bd_t __res;
-EXPORT_SYMBOL(__res); /* For modules */
-
-
-/* ======================================================================== */
-/* OCP device definition */
-/* For board/shared resources like PSCs */
-/* ======================================================================== */
-/* Be sure not to load conficting devices : e.g. loading the UART drivers for
- * PSC1 and then also loading a AC97 for this same PSC.
- * For details about how to create an entry, look in the doc of the concerned
- * driver ( eg drivers/serial/mpc52xx_uart.c for the PSC in uart mode )
- */
-
-struct ocp_def board_ocp[] = {
- {
- .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_PSC_UART,
- .index = 0,
- .paddr = MPC52xx_PSC1,
- .irq = MPC52xx_PSC1_IRQ,
- .pm = OCP_CPM_NA,
- },
- { /* Terminating entry */
- .vendor = OCP_VENDOR_INVALID
- }
-};
-
-
-/* ======================================================================== */
-/* Platform specific code */
-/* ======================================================================== */
-
-static int
-icecube_show_cpuinfo(struct seq_file *m)
-{
- seq_printf(m, "machine\t\t: Freescale LITE5200\n");
- return 0;
-}
-
-static void __init
-icecube_setup_arch(void)
-{
-
- /* Add board OCP definitions */
- mpc52xx_add_board_devices(board_ocp);
-}
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* Generic MPC52xx platform initialization */
- /* TODO Create one and move a max of stuff in it.
- Put this init in the syslib */
-
- struct bi_record *bootinfo = find_bootinfo();
-
- if (bootinfo)
- parse_bootinfo(bootinfo);
- else {
- /* Load the bd_t board info structure */
- if (r3)
- memcpy((void*)&__res,(void*)(r3+KERNELBASE),
- sizeof(bd_t));
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /* Load the initrd */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif
-
- /* Load the command line */
- if (r6) {
- *(char *)(r7+KERNELBASE) = 0;
- strcpy(cmd_line, (char *)(r6+KERNELBASE));
- }
- }
-
- /* BAT setup */
- mpc52xx_set_bat();
-
- /* No ISA bus AFAIK */
- isa_io_base = 0;
- isa_mem_base = 0;
-
- /* Setup the ppc_md struct */
- ppc_md.setup_arch = icecube_setup_arch;
- ppc_md.show_cpuinfo = icecube_show_cpuinfo;
- ppc_md.show_percpuinfo = NULL;
- ppc_md.init_IRQ = mpc52xx_init_irq;
- ppc_md.get_irq = mpc52xx_get_irq;
-
- ppc_md.find_end_of_memory = mpc52xx_find_end_of_memory;
- ppc_md.setup_io_mappings = mpc52xx_map_io;
-
- ppc_md.restart = mpc52xx_restart;
- ppc_md.power_off = mpc52xx_power_off;
- ppc_md.halt = mpc52xx_halt;
-
- /* No time keeper on the IceCube */
- ppc_md.time_init = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.set_rtc_time = NULL;
-
- ppc_md.calibrate_decr = mpc52xx_calibrate_decr;
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- ppc_md.progress = mpc52xx_progress;
-#endif
-}
-
+++ /dev/null
-/*
- * arch/ppc/platforms/lite5200.h
- *
- * Definitions for Freescale LITE5200 : MPC52xx Standard Development
- * Platform board support
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __PLATFORMS_LITE5200_H__
-#define __PLATFORMS_LITE5200_H__
-
-/* Serial port used for low-level debug */
-#define MPC52xx_PF_CONSOLE_PORT 0 /* PSC1 */
-
-
-#endif /* __PLATFORMS_LITE5200_H__ */
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc5200.c
- *
- * OCP Definitions for the boards based on MPC5200 processor. Contains
- * definitions for every common peripherals. (Mostly all but PSCs)
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <asm/ocp.h>
-#include <asm/mpc52xx.h>
-
-/* Here is the core_ocp struct.
- * With all the devices common to all board. Even if port multiplexing is
- * not setup for them (if the user don't want them, just don't select the
- * config option). The potentially conflicting devices (like PSCs) goes in
- * board specific file.
- */
-struct ocp_def core_ocp[] = {
- { /* Terminating entry */
- .vendor = OCP_VENDOR_INVALID
- }
-};
static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
- u32 *reg;
/* OF only reports the high frequency */
hi_freq = cur_freq;
return 1;
}
- reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
+ u32 *reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
voltage_gpio = *reg;
set_speed_proc = dfs_set_cpu_speed;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
* (iBook, G4, new IMacs, and all the recent Apple machines).
* It contains 3 controllers in one ASIC.
*
- * The U3 is the bridge used on G5 machines. It contains an
+ * The U3 is the bridge used on G5 machines. It contains on
* AGP bus which is dealt with the old UniNorth access routines
- * and a HyperTransport bus which uses its own set of access
+ * and an HyperTransport bus which uses its own set of access
* functions.
*/
continue;
if (0x0035 != *prop)
continue;
- prop = (u32 *)get_property(nec, "reg", NULL);
+ prop = (u32 *)get_property(nec, "reg", 0);
if (prop == NULL)
continue;
devfn = (prop[0] >> 8) & 0xff;
* any of the 0xfxxxxxxx "fine" memory regions to /ht.
* We need to fix that sooner or later by either parsing all child "ranges"
* properties or figuring out the U3 address space decoding logic and
- * then read its configuration register (if any).
+ * then read it's configuration register (if any).
*/
hose->io_base_phys = 0xf4000000 + 0x00400000;
hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
* default, gmac is not powered up, and so will be absent
* from the kernel initial PCI lookup.
*
- * Should be replaced by 2.4 new PCI mechanisms and really
- * register the device.
+ * Should be replaced by 2.4 new PCI mecanisms and really
+ * regiser the device.
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-/* When an irq gets requested for the first client, if it's an
- * edge interrupt, we clear any previous one on the controller
- */
-static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
-{
- unsigned long bit = 1UL << (irq_nr & 0x1f);
- int i = irq_nr >> 5;
-
- if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
- out_le32(&pmac_irq_hw[i]->ack, bit);
- set_bit(irq_nr, ppc_cached_irq_mask);
- pmac_set_irq_mask(irq_nr, 0);
-
- return 0;
-}
-
static void __pmac pmac_mask_irq(unsigned int irq_nr)
{
clear_bit(irq_nr, ppc_cached_irq_mask);
struct hw_interrupt_type pmac_pic = {
- .typename = " PMAC-PIC ",
- .startup = pmac_startup_irq,
- .enable = pmac_unmask_irq,
- .disable = pmac_mask_irq,
- .ack = pmac_mask_and_ack_irq,
- .end = pmac_end_irq,
+ " PMAC-PIC ",
+ NULL,
+ NULL,
+ pmac_unmask_irq,
+ pmac_mask_irq,
+ pmac_mask_and_ack_irq,
+ pmac_end_irq,
+ NULL
};
struct hw_interrupt_type gatwick_pic = {
- .typename = " GATWICK ",
- .startup = pmac_startup_irq,
- .enable = pmac_unmask_irq,
- .disable = pmac_mask_irq,
- .ack = pmac_mask_and_ack_irq,
- .end = pmac_end_irq,
+ " GATWICK ",
+ NULL,
+ NULL,
+ pmac_unmask_irq,
+ pmac_mask_irq,
+ pmac_mask_and_ack_irq,
+ pmac_end_irq,
+ NULL
};
static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
nmi_irq = pswitch->intrs[0].line;
openpic_init_nmi_irq(nmi_irq);
request_irq(nmi_irq, xmon_irq, 0,
- "NMI - XMON", NULL);
+ "NMI - XMON", 0);
}
}
#endif /* CONFIG_XMON */
for ( i = max_real_irqs ; i < max_irqs ; i++ )
irq_desc[i].handler = &gatwick_pic;
request_irq( irq_cascade, gatwick_action, SA_INTERRUPT,
- "cascade", NULL );
+ "cascade", 0 );
}
printk("System has %d possible interrupts\n", max_irqs);
if (max_irqs != max_real_irqs)
max_real_irqs);
#ifdef CONFIG_XMON
- request_irq(20, xmon_irq, 0, "NMI - XMON", NULL);
+ request_irq(20, xmon_irq, 0, "NMI - XMON", 0);
#endif /* CONFIG_XMON */
}
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", NULL))
+ if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", 0))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
}
/* Check the first PCI device to see if it is a Raven. */
- early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &devid);
+ early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &devid);
switch (devid & 0xffff0000) {
case MPIC_RAVEN_ID:
/* Read the memory base register. */
- early_read_config_dword(NULL, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+ early_read_config_dword(0, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
if (pci_membase == 0) {
OpenPIC_Addr = NULL;
irq_desc[i].handler = &i8259_pic;
/* If we have a Raven PCI bridge or a Hawk PCI bridge / Memory
* controller, we poll (as they have a different int-ack address). */
- early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &pci_viddid);
+ early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &pci_viddid);
pci_did = (pci_viddid & 0xffff0000) >> 16;
if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA)
&& ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN)
--- /dev/null
+/*
+ * arch/ppc/platforms/proc_rtas.c
+ * Copyright (C) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * RTAS (Runtime Abstraction Services) stuff
+ * Intention is to provide a clean user interface
+ * to use the RTAS.
+ *
+ * TODO:
+ * Split off a header file and maybe move it to a different
+ * location. Write Documentation on what the /proc/rtas/ entries
+ * actually do.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h> /* for ppc_md */
+#include <asm/time.h>
+
+/* Token for Sensors */
+#define KEY_SWITCH 0x0001
+#define ENCLOSURE_SWITCH 0x0002
+#define THERMAL_SENSOR 0x0003
+#define LID_STATUS 0x0004
+#define POWER_SOURCE 0x0005
+#define BATTERY_VOLTAGE 0x0006
+#define BATTERY_REMAINING 0x0007
+#define BATTERY_PERCENTAGE 0x0008
+#define EPOW_SENSOR 0x0009
+#define BATTERY_CYCLESTATE 0x000a
+#define BATTERY_CHARGING 0x000b
+
+/* IBM specific sensors */
+#define IBM_SURVEILLANCE 0x2328 /* 9000 */
+#define IBM_FANRPM 0x2329 /* 9001 */
+#define IBM_VOLTAGE 0x232a /* 9002 */
+#define IBM_DRCONNECTOR 0x232b /* 9003 */
+#define IBM_POWERSUPPLY 0x232c /* 9004 */
+#define IBM_INTQUEUE 0x232d /* 9005 */
+
+/* Status return values */
+#define SENSOR_CRITICAL_HIGH 13
+#define SENSOR_WARNING_HIGH 12
+#define SENSOR_NORMAL 11
+#define SENSOR_WARNING_LOW 10
+#define SENSOR_CRITICAL_LOW 9
+#define SENSOR_SUCCESS 0
+#define SENSOR_HW_ERROR -1
+#define SENSOR_BUSY -2
+#define SENSOR_NOT_EXIST -3
+#define SENSOR_DR_ENTITY -9000
+
+/* Location Codes */
+#define LOC_SCSI_DEV_ADDR 'A'
+#define LOC_SCSI_DEV_LOC 'B'
+#define LOC_CPU 'C'
+#define LOC_DISKETTE 'D'
+#define LOC_ETHERNET 'E'
+#define LOC_FAN 'F'
+#define LOC_GRAPHICS 'G'
+/* reserved / not used 'H' */
+#define LOC_IO_ADAPTER 'I'
+/* reserved / not used 'J' */
+#define LOC_KEYBOARD 'K'
+#define LOC_LCD 'L'
+#define LOC_MEMORY 'M'
+#define LOC_NV_MEMORY 'N'
+#define LOC_MOUSE 'O'
+#define LOC_PLANAR 'P'
+#define LOC_OTHER_IO 'Q'
+#define LOC_PARALLEL 'R'
+#define LOC_SERIAL 'S'
+#define LOC_DEAD_RING 'T'
+#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
+#define LOC_VOLTAGE 'V'
+#define LOC_SWITCH_ADAPTER 'W'
+#define LOC_OTHER 'X'
+#define LOC_FIRMWARE 'Y'
+#define LOC_SCSI 'Z'
+
+/* Tokens for indicators */
+#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
+#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
+#define SYSTEM_POWER_STATE 0x0003
+#define WARNING_LIGHT 0x0004
+#define DISK_ACTIVITY_LIGHT 0x0005
+#define HEX_DISPLAY_UNIT 0x0006
+#define BATTERY_WARNING_TIME 0x0007
+#define CONDITION_CYCLE_REQUEST 0x0008
+#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
+#define DR_ACTION 0x2329 /* 9001 */
+#define DR_INDICATOR 0x232a /* 9002 */
+/* 9003 - 9004: Vendor specific */
+#define GLOBAL_INTERRUPT_QUEUE 0x232d /* 9005 */
+/* 9006 - 9999: Vendor specific */
+
+/* other */
+#define MAX_SENSORS 17 /* I only know of 17 sensors */
+#define MAX_LINELENGTH 256
+#define SENSOR_PREFIX "ibm,sensor-"
+#define cel_to_fahr(x) ((x*9/5)+32)
+
+
+/* Globals */
+static struct proc_dir_entry *proc_rtas;
+static struct rtas_sensors sensors;
+static struct device_node *rtas;
+static unsigned long power_on_time = 0; /* Save the time the user set */
+static char progress_led[MAX_LINELENGTH];
+
+static unsigned long rtas_tone_frequency = 1000;
+static unsigned long rtas_tone_volume = 0;
+
+/* ****************STRUCTS******************************************* */
+struct individual_sensor {
+ unsigned int token;
+ unsigned int quant;
+};
+
+struct rtas_sensors {
+ struct individual_sensor sensor[MAX_SENSORS];
+ unsigned int quant;
+};
+
+/* ****************************************************************** */
+/* Declarations */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+
+struct file_operations ppc_rtas_poweron_operations = {
+ .read = ppc_rtas_poweron_read,
+ .write = ppc_rtas_poweron_write
+};
+struct file_operations ppc_rtas_progress_operations = {
+ .read = ppc_rtas_progress_read,
+ .write = ppc_rtas_progress_write
+};
+
+struct file_operations ppc_rtas_clock_operations = {
+ .read = ppc_rtas_clock_read,
+ .write = ppc_rtas_clock_write
+};
+
+struct file_operations ppc_rtas_tone_freq_operations = {
+ .read = ppc_rtas_tone_freq_read,
+ .write = ppc_rtas_tone_freq_write
+};
+struct file_operations ppc_rtas_tone_volume_operations = {
+ .read = ppc_rtas_tone_volume_read,
+ .write = ppc_rtas_tone_volume_write
+};
+
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
+
+/* ****************************************************************** */
+/* MAIN */
+/* ****************************************************************** */
+static int __init proc_rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ rtas = find_devices("rtas");
+ if ((rtas == 0) || (_machine != _MACH_chrp)) {
+ return 1;
+ }
+
+ proc_rtas = proc_mkdir("rtas", 0);
+ if (proc_rtas == 0)
+ return 1;
+
+ /* /proc/rtas entries */
+
+ entry = create_proc_entry("progress", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_progress_operations;
+
+ entry = create_proc_entry("clock", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_clock_operations;
+
+ entry = create_proc_entry("poweron", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_poweron_operations;
+
+ create_proc_read_entry("sensors", S_IRUGO, proc_rtas,
+ ppc_rtas_sensor_read, NULL);
+
+ entry = create_proc_entry("frequency", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_freq_operations;
+
+ entry = create_proc_entry("volume", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_volume_operations;
+
+ return 0;
+}
+__initcall(proc_rtas_init);
+
+/* ****************************************************************** */
+/* POWER-ON-TIME */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
+ power_on_time = nowtime; /* save the time */
+
+ to_tm(nowtime, &tm);
+
+ error = call_rtas("set-time-for-power-on", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting poweron time returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ if (power_on_time == 0)
+ n = sprintf(buf, "Power on time not set\n");
+ else
+ n = sprintf(buf, "%lu\n", power_on_time);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* PROGRESS */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long hex;
+
+ strcpy(progress_led, buf); /* save the string */
+ /* Lets see if the user passed hexdigits */
+ hex = simple_strtoul(buf, NULL, 10);
+
+ ppc_md.progress ((char *)buf, hex);
+ return count;
+
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n = 0;
+ if (progress_led != NULL)
+ n = sprintf (buf, "%s\n", progress_led);
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* CLOCK */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
+
+ to_tm(nowtime, &tm);
+ error = call_rtas("set-time-of-day", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long *ret = kmalloc(4*8, GFP_KERNEL);
+ int n, error;
+
+ error = call_rtas("get-time-of-day", 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
+ printk(KERN_WARNING "error: reading the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ n = sprintf (buf, "0");
+ } else {
+ n = sprintf (buf, "%lu\n", mktime(year, mon, day, hour, min, sec));
+ }
+ kfree(ret);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* SENSOR STUFF */
+/* ****************************************************************** */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i,j,n;
+ unsigned long ret;
+ int state, error;
+ char buffer[MAX_LINELENGTH*MAX_SENSORS]; /* May not be enough */
+
+ if (count < 0)
+ return -EINVAL;
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
+
+ if (ppc_rtas_find_all_sensors() != 0) {
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
+ }
+
+ for (i=0; i<sensors.quant; i++) {
+ j = sensors.sensor[i].quant;
+ /* A sensor may have multiple instances */
+ while (j >= 0) {
+ error = call_rtas("get-sensor-state", 2, 2, &ret,
+ sensors.sensor[i].token, sensors.sensor[i].quant-j);
+ state = (int) ret;
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state, error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ return 0;
+ }
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ return n;
+}
+
+/* ****************************************************************** */
+
+int ppc_rtas_find_all_sensors (void)
+{
+ unsigned long *utmp;
+ int len, i, j;
+
+ utmp = (unsigned long *) get_property(rtas, "rtas-sensors", &len);
+ if (utmp == NULL) {
+ printk (KERN_ERR "error: could not get rtas-sensors\n");
+ return 1;
+ }
+
+ sensors.quant = len / 8; /* int + int */
+
+ for (i=0, j=0; j<sensors.quant; i+=2, j++) {
+ sensors.sensor[j].token = utmp[i];
+ sensors.sensor[j].quant = utmp[i+1];
+ }
+ return 0;
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string of what rtas returned
+ */
+char * ppc_rtas_process_error(int error)
+{
+ switch (error) {
+ case SENSOR_CRITICAL_HIGH:
+ return "(critical high)";
+ case SENSOR_WARNING_HIGH:
+ return "(warning high)";
+ case SENSOR_NORMAL:
+ return "(normal)";
+ case SENSOR_WARNING_LOW:
+ return "(warning low)";
+ case SENSOR_CRITICAL_LOW:
+ return "(critical low)";
+ case SENSOR_SUCCESS:
+ return "(read ok)";
+ case SENSOR_HW_ERROR:
+ return "(hardware error)";
+ case SENSOR_BUSY:
+ return "(busy)";
+ case SENSOR_NOT_EXIST:
+ return "(non existant)";
+ case SENSOR_DR_ENTITY:
+ return "(dr entity removed)";
+ default:
+ return "(UNKNOWN)";
+ }
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string out of what the sensor said
+ */
+
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
+{
+ /* Defined return vales */
+ const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", "Mainenance" };
+ const char * enclosure_switch[] = { "Closed", "Open" };
+ const char * lid_status[] = { " ", "Open", "Closed" };
+ const char * power_source[] = { "AC\t", "Battery", "AC & Battery" };
+ const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
+ const char * epow_sensor[] = {
+ "EPOW Reset", "Cooling warning", "Power warning",
+ "System shutdown", "System halt", "EPOW main enclosure",
+ "EPOW power off" };
+ const char * battery_cyclestate[] = { "None", "In progress", "Requested" };
+ const char * battery_charging[] = { "Charging", "Discharching", "No current flow" };
+ const char * ibm_drconnector[] = { "Empty", "Present" };
+ const char * ibm_intqueue[] = { "Disabled", "Enabled" };
+
+ int have_strings = 0;
+ int temperature = 0;
+ int unknown = 0;
+ int n = 0;
+
+ /* What kind of sensor do we have here? */
+ switch (s.token) {
+ case KEY_SWITCH:
+ n += sprintf(buf+n, "Key switch:\t");
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
+ have_strings = 1;
+ break;
+ case ENCLOSURE_SWITCH:
+ n += sprintf(buf+n, "Enclosure switch:\t");
+ n += sprintf(buf+n, "%s\t", enclosure_switch[state]);
+ have_strings = 1;
+ break;
+ case THERMAL_SENSOR:
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
+ temperature = 1;
+ break;
+ case LID_STATUS:
+ n += sprintf(buf+n, "Lid status:\t");
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
+ have_strings = 1;
+ break;
+ case POWER_SOURCE:
+ n += sprintf(buf+n, "Power source:\t");
+ n += sprintf(buf+n, "%s\t", power_source[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_VOLTAGE:
+ n += sprintf(buf+n, "Battery voltage:\t");
+ break;
+ case BATTERY_REMAINING:
+ n += sprintf(buf+n, "Battery remaining:\t");
+ n += sprintf(buf+n, "%s\t", battery_remaining[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_PERCENTAGE:
+ n += sprintf(buf+n, "Battery percentage:\t");
+ break;
+ case EPOW_SENSOR:
+ n += sprintf(buf+n, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CYCLESTATE:
+ n += sprintf(buf+n, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "%s\t", battery_cyclestate[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CHARGING:
+ n += sprintf(buf+n, "Battery Charging:\t");
+ n += sprintf(buf+n, "%s\t", battery_charging[state]);
+ have_strings = 1;
+ break;
+ case IBM_SURVEILLANCE:
+ n += sprintf(buf+n, "Surveillance:\t");
+ break;
+ case IBM_FANRPM:
+ n += sprintf(buf+n, "Fan (rpm):\t");
+ break;
+ case IBM_VOLTAGE:
+ n += sprintf(buf+n, "Voltage (mv):\t");
+ break;
+ case IBM_DRCONNECTOR:
+ n += sprintf(buf+n, "DR connector:\t");
+ n += sprintf(buf+n, "%s\t", ibm_drconnector[state]);
+ have_strings = 1;
+ break;
+ case IBM_POWERSUPPLY:
+ n += sprintf(buf+n, "Powersupply:\t");
+ break;
+ case IBM_INTQUEUE:
+ n += sprintf(buf+n, "Interrupt queue:\t");
+ n += sprintf(buf+n, "%s\t", ibm_intqueue[state]);
+ have_strings = 1;
+ break;
+ default:
+ n += sprintf(buf+n, "Unkown sensor (type %d), ignoring it\n",
+ s.token);
+ unknown = 1;
+ have_strings = 1;
+ break;
+ }
+ if (have_strings == 0) {
+ if (temperature) {
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
+ } else
+ n += sprintf(buf+n, "%10d\t", state);
+ }
+ if (unknown == 0) {
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
+ }
+ return n;
+}
+
+/* ****************************************************************** */
+
+int check_location (char *c, int idx, char * buf)
+{
+ int n = 0;
+
+ switch (*(c+idx)) {
+ case LOC_PLANAR:
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
+ break;
+ case LOC_CPU:
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
+ break;
+ case LOC_FAN:
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
+ break;
+ case LOC_RACKMOUNTED:
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
+ break;
+ case LOC_VOLTAGE:
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
+ break;
+ case LOC_LCD:
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
+ break;
+ case '.':
+ n += sprintf ( buf, "- %c", *(c+idx+1));
+ default:
+ n += sprintf ( buf, "Unknown location");
+ break;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+/*
+ * Format:
+ * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
+ * the '.' may be an abbrevation
+ */
+int check_location_string (char *c, char *buf)
+{
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+
+int get_location_code(struct individual_sensor s, char * buffer)
+{
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas, rstr, &llen);
+
+ n=0;
+ if (ret[0] == '\0')
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
+ else {
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ sprintf ( t, "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
+ }
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Frequency */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long freq;
+ char *dest;
+ int error;
+ freq = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
+ rtas_tone_frequency = freq; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_FREQUENCY, 0, freq);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_frequency);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Volume */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long volume;
+ char *dest;
+ int error;
+ volume = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
+
+ rtas_tone_volume = volume; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_VOLUME, 0, volume);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone volume returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_volume);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
!(n--) ) return res->Devices+i;
#undef Dev
}
- return NULL;
+ return 0;
}
PPC_DEVICE __init *residual_find_device_id(unsigned long BusMask,
!(n--) ) return res->Devices+i;
#undef Dev
}
- return NULL;
+ return 0;
}
PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
int n)
{
unsigned mask, masked_tag, size;
- if(!p) return NULL;
+ if(!p) return 0;
if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
masked_tag = packet_tag&mask;
for(; *p != END_TAG; p+=size) {
else
size=tag_small_count(*p)+1;
}
- return NULL; /* not found */
+ return 0; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_small_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return NULL; /* not found */
+ return 0; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_large_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return NULL; /* not found */
+ return 0; /* not found */
}
#ifdef CONFIG_PROC_PREPRESIDUAL
+++ /dev/null
-/*
- * arch/ppc/platforms/rpx8260.c
- *
- * RPC EP8260 platform support
- *
- * Author: Dan Malek <dan@embeddededge.com>
- * Derived from: pq2ads_setup.c by Kumar
- *
- * Copyright 2004 Embedded Edge, LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/seq_file.h>
-
-#include <asm/mpc8260.h>
-#include <asm/machdep.h>
-
-static void (*callback_setup_arch)(void);
-
-extern unsigned char __res[sizeof(bd_t)];
-
-extern void m8260_init(unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, unsigned long r7);
-
-static int
-ep8260_show_cpuinfo(struct seq_file *m)
-{
- bd_t *binfo = (bd_t *)__res;
-
- seq_printf(m, "vendor\t\t: RPC\n"
- "machine\t\t: EP8260 PPC\n"
- "\n"
- "mem size\t\t: 0x%08x\n"
- "console baud\t\t: %d\n"
- "\n",
- binfo->bi_memsize,
- binfo->bi_baudrate);
- return 0;
-}
-
-static void __init
-ep8260_setup_arch(void)
-{
- printk("RPC EP8260 Port\n");
- callback_setup_arch();
-}
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* Generic 8260 platform initialization */
- m8260_init(r3, r4, r5, r6, r7);
-
- /* Anything special for this platform */
- ppc_md.show_cpuinfo = ep8260_show_cpuinfo;
-
- callback_setup_arch = ppc_md.setup_arch;
- ppc_md.setup_arch = ep8260_setup_arch;
-}
* Copyright (c) 2001 Dan Malek <dan@embeddededge.com>
*/
#ifdef __KERNEL__
-#ifndef __ASM_PLATFORMS_RPX8260_H__
-#define __ASM_PLATFORMS_RPX8260_H__
+#ifndef __ASM_PLATFORMS_RPXSUPER_H__
+#define __ASM_PLATFORMS_RPXSUPER_H__
/* A Board Information structure that is given to a program when
* prom starts it up.
#define BCSR4_EN_MII ((u_char)0x40) /* Enable PHY */
#define BCSR4_MII_READ ((u_char)0x04)
#define BCSR4_MII_MDC ((u_char)0x02)
-#define BCSR4_MII_MDIO ((u_char)0x01)
+#define BCSR4_MII_MDIO ((u_char)0x02)
#define BCSR13_FETH_IRQMASK ((u_char)0xf0)
#define BCSR15_FETH_IRQ ((u_char)0x20)
-#define PHY_INTERRUPT SIU_INT_IRQ7
-
-#endif /* __ASM_PLATFORMS_RPX8260_H__ */
+#endif /* __ASM_PLATFORMS_RPXSUPER_H__ */
#endif /* __KERNEL__ */
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_4xx
+EXTRA_AFLAGS := -Wa,-m405
+endif
+ifdef CONFIG_E500
+EXTRA_AFLAGS := -Wa,-me500
+endif
+
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
obj-$(CONFIG_4xx) += ppc4xx_pic.o
obj-$(CONFIG_40x) += ppc4xx_setup.o
obj-$(CONFIG_GEN_RTC) += todc_time.o
-obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
-obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
+obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
ifeq ($(CONFIG_40x),y)
obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o ppc405_pci.o
obj-$(CONFIG_SBC82xx) += todc_time.o
obj-$(CONFIG_SPRUCE) += cpc700_pic.o indirect_pci.o pci_auto.o \
todc_time.o
-obj-$(CONFIG_8260) += m8260_setup.o
+obj-$(CONFIG_8260) += m8260_setup.o cpm2_pic.o
obj-$(CONFIG_PCI_8260) += m8260_pci.o indirect_pci.o
obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o
-obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
+obj-$(CONFIG_CPM2) += cpm2_common.o
ifeq ($(CONFIG_PPC_GEN550),y)
obj-$(CONFIG_KGDB) += gen550_kgdb.o gen550_dbg.o
obj-$(CONFIG_SERIAL_TEXT_DEBUG) += gen550_dbg.o
ifeq ($(CONFIG_85xx),y)
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o
endif
-obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o
*/
cpm2_map_t *cpm2_immr;
-#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
- of space for CPM as it is larger
- than on PQ2 */
-
void
cpm2_reset(void)
{
- cpm2_immr = (cpm2_map_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
+ cpm2_immr = (cpm2_map_t *)CPM_MAP_ADDR;
/* Reclaim the DP memory for our use.
*/
* oversampled clock.
*/
void
-cpm_setbrg(uint brg, uint rate)
+cpm2_setbrg(uint brg, uint rate)
{
volatile uint *bp;
static void cpm2_dpinit(void)
{
+ void *dprambase = &((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase;
+
spin_lock_init(&cpm_dpmem_lock);
/* initialize the info header */
* varies with the processor and the microcode patches activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE,
+ rh_attach_region(&cpm_dpmem_info, dprambase + CPM_DATAONLY_BASE,
CPM_DATAONLY_SIZE);
}
-/* This function returns an index into the DPRAM area.
+/* This function used to return an index into the DPRAM area.
+ * Now it returns the actuall physical address of that area.
+ * use cpm2_dpram_offset() to get the index
*/
-uint cpm_dpalloc(uint size, uint align)
+void *cpm2_dpalloc(uint size, uint align)
{
void *start;
unsigned long flags;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc);
+EXPORT_SYMBOL(cpm2_dpalloc);
-int cpm_dpfree(uint offset)
+int cpm2_dpfree(void *addr)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, (void *)offset);
+ ret = rh_free(&cpm_dpmem_info, addr);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(cpm_dpfree);
+EXPORT_SYMBOL(cpm2_dpfree);
/* not sure if this is ever needed */
-uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
+void *cpm2_dpalloc_fixed(void *addr, uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
cpm_dpmem_info.alignment = align;
- start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
+ start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+EXPORT_SYMBOL(cpm2_dpalloc_fixed);
-void cpm_dpdump(void)
+void cpm2_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(cpm_dpdump);
+EXPORT_SYMBOL(cpm2_dpdump);
+
+uint cpm2_dpram_offset(void *addr)
+{
+ return (uint)((u_char *)addr -
+ ((uint)((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase));
+}
+EXPORT_SYMBOL(cpm2_dpram_offset);
-void *cpm_dpram_addr(uint offset)
+void *cpm2_dpram_addr(int offset)
{
- return (void *)&cpm2_immr->im_dprambase[offset];
+ return (void *)&((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase[offset];
}
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(cpm2_dpram_addr);
volatile cpm2_map_t *immap = cpm2_immr;
/* allocate IDMA dpram */
- dpram_offset = cpm_dpalloc(sizeof(idma_dpram_t), 64);
- idma_dpram = cpm_dpram_addr(dpram_offset);
+ dpram_offset = cpm2_dpalloc(sizeof(idma_dpram_t), 64);
+ idma_dpram =
+ (volatile idma_dpram_t *)&immap->im_dprambase[dpram_offset];
/* initialize the IDMA parameter RAM */
memset((void *)idma_dpram, 0, sizeof(idma_dpram_t));
+++ /dev/null
-/*
- * arch/ppc/syslib/mpc52xx_pic.c
- *
- * Programmable Interrupt Controller functions for the Freescale MPC52xx
- * embedded CPU.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based on (well, mostly copied from) the code from the 2.4 kernel by
- * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/stddef.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/stddef.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/irq.h>
-#include <asm/mpc52xx.h>
-
-
-static struct mpc52xx_intr *intr;
-static struct mpc52xx_sdma *sdma;
-
-static void
-mpc52xx_ic_disable(unsigned int irq)
-{
- u32 val;
-
- if (irq == MPC52xx_IRQ0) {
- val = in_be32(&intr->ctrl);
- val &= ~(1 << 11);
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_IRQ1) {
- BUG();
- }
- else if (irq <= MPC52xx_IRQ3) {
- val = in_be32(&intr->ctrl);
- val &= ~(1 << (10 - (irq - MPC52xx_IRQ1)));
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_SDMA_IRQ_BASE) {
- val = in_be32(&intr->main_mask);
- val |= 1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE));
- out_be32(&intr->main_mask, val);
- }
- else if (irq < MPC52xx_PERP_IRQ_BASE) {
- val = in_be32(&sdma->IntMask);
- val |= 1 << (irq - MPC52xx_SDMA_IRQ_BASE);
- out_be32(&sdma->IntMask, val);
- }
- else {
- val = in_be32(&intr->per_mask);
- val |= 1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE));
- out_be32(&intr->per_mask, val);
- }
-}
-
-static void
-mpc52xx_ic_enable(unsigned int irq)
-{
- u32 val;
-
- if (irq == MPC52xx_IRQ0) {
- val = in_be32(&intr->ctrl);
- val |= 1 << 11;
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_IRQ1) {
- BUG();
- }
- else if (irq <= MPC52xx_IRQ3) {
- val = in_be32(&intr->ctrl);
- val |= 1 << (10 - (irq - MPC52xx_IRQ1));
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_SDMA_IRQ_BASE) {
- val = in_be32(&intr->main_mask);
- val &= ~(1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE)));
- out_be32(&intr->main_mask, val);
- }
- else if (irq < MPC52xx_PERP_IRQ_BASE) {
- val = in_be32(&sdma->IntMask);
- val &= ~(1 << (irq - MPC52xx_SDMA_IRQ_BASE));
- out_be32(&sdma->IntMask, val);
- }
- else {
- val = in_be32(&intr->per_mask);
- val &= ~(1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE)));
- out_be32(&intr->per_mask, val);
- }
-}
-
-static void
-mpc52xx_ic_ack(unsigned int irq)
-{
- u32 val;
-
- /*
- * Only some irqs are reset here, others in interrupting hardware.
- */
-
- switch (irq) {
- case MPC52xx_IRQ0:
- val = in_be32(&intr->ctrl);
- val |= 0x08000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_CCS_IRQ:
- val = in_be32(&intr->enc_status);
- val |= 0x00000400;
- out_be32(&intr->enc_status, val);
- break;
- case MPC52xx_IRQ1:
- val = in_be32(&intr->ctrl);
- val |= 0x04000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_IRQ2:
- val = in_be32(&intr->ctrl);
- val |= 0x02000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_IRQ3:
- val = in_be32(&intr->ctrl);
- val |= 0x01000000;
- out_be32(&intr->ctrl, val);
- break;
- default:
- if (irq >= MPC52xx_SDMA_IRQ_BASE
- && irq < (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)) {
- out_be32(&sdma->IntPend,
- 1 << (irq - MPC52xx_SDMA_IRQ_BASE));
- }
- break;
- }
-}
-
-static void
-mpc52xx_ic_disable_and_ack(unsigned int irq)
-{
- mpc52xx_ic_disable(irq);
- mpc52xx_ic_ack(irq);
-}
-
-static void
-mpc52xx_ic_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- mpc52xx_ic_enable(irq);
-}
-
-static struct hw_interrupt_type mpc52xx_ic = {
- "MPC52xx",
- NULL, /* startup(irq) */
- NULL, /* shutdown(irq) */
- mpc52xx_ic_enable, /* enable(irq) */
- mpc52xx_ic_disable, /* disable(irq) */
- mpc52xx_ic_disable_and_ack, /* disable_and_ack(irq) */
- mpc52xx_ic_end, /* end(irq) */
- 0 /* set_affinity(irq, cpumask) SMP. */
-};
-
-void __init
-mpc52xx_init_irq(void)
-{
- int i;
-
- /* Remap the necessary zones */
- intr = (struct mpc52xx_intr *)
- ioremap(MPC52xx_INTR, sizeof(struct mpc52xx_intr));
- sdma = (struct mpc52xx_sdma *)
- ioremap(MPC52xx_SDMA, sizeof(struct mpc52xx_sdma));
-
- if ((intr==NULL) || (sdma==NULL))
- panic("Can't ioremap PIC/SDMA register for init_irq !");
-
- /* Disable all interrupt sources. */
- out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
- out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
- out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
- out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
- out_be32(&intr->ctrl,
- 0x0f000000 | /* clear IRQ 0-3 */
- 0x00c00000 | /* IRQ0: level-sensitive, active low */
- 0x00001000 | /* MEE master external enable */
- 0x00000000 | /* 0 means disable IRQ 0-3 */
- 0x00000001); /* CEb route critical normally */
-
- /* Zero a bunch of the priority settings. */
- out_be32(&intr->per_pri1, 0);
- out_be32(&intr->per_pri2, 0);
- out_be32(&intr->per_pri3, 0);
- out_be32(&intr->main_pri1, 0);
- out_be32(&intr->main_pri2, 0);
-
- /* Initialize irq_desc[i].handler's with mpc52xx_ic. */
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].handler = &mpc52xx_ic;
- irq_desc[i].status = IRQ_LEVEL;
- }
-}
-
-int
-mpc52xx_get_irq(struct pt_regs *regs)
-{
- u32 status;
- int irq = -1;
-
- status = in_be32(&intr->enc_status);
-
- if (status & 0x00000400) { /* critical */
- irq = (status >> 8) & 0x3;
- if (irq == 2) /* high priority peripheral */
- goto peripheral;
- irq += MPC52xx_CRIT_IRQ_BASE;
- }
- else if (status & 0x00200000) { /* main */
- irq = (status >> 16) & 0x1f;
- if (irq == 4) /* low priority peripheral */
- goto peripheral;
- irq += MPC52xx_MAIN_IRQ_BASE;
- }
- else if (status & 0x20000000) { /* peripheral */
-peripheral:
- irq = (status >> 24) & 0x1f;
- if (irq == 0) { /* bestcomm */
- status = in_be32(&sdma->IntPend);
- irq = ffs(status) + MPC52xx_SDMA_IRQ_BASE-1;
- }
- else
- irq += MPC52xx_PERP_IRQ_BASE;
- }
-
- return irq;
-}
-
+++ /dev/null
-/*
- * arch/ppc/syslib/mpc52xx_common.c
- *
- * Common code for the boards based on Freescale MPC52xx embedded CPU.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Support for other bootloaders than UBoot by Dale Farnsworth
- * <dfarnsworth@mvista.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/config.h>
-
-#include <asm/time.h>
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-#include <asm/ocp.h>
-#include <asm/ppcboot.h>
-
-extern bd_t __res;
-
-static int core_mult[] = { /* CPU Frequency multiplier, taken */
- 0, 0, 0, 10, 20, 20, 25, 45, /* from the datasheet used to compute */
- 30, 55, 40, 50, 0, 60, 35, 0, /* CPU frequency from XLB freq and */
- 30, 25, 65, 10, 70, 20, 75, 45, /* external jumper config */
- 0, 55, 40, 50, 80, 60, 35, 0
-};
-
-void
-mpc52xx_restart(char *cmd)
-{
- struct mpc52xx_gpt* gpt0 = (struct mpc52xx_gpt*) MPC52xx_GPTx(0);
-
- local_irq_disable();
-
- /* Turn on the watchdog and wait for it to expire. It effectively
- does a reset */
- if (gpt0 != NULL) {
- out_be32(&gpt0->count, 0x000000ff);
- out_be32(&gpt0->mode, 0x00009004);
- } else
- printk(KERN_ERR "mpc52xx_restart: Unable to ioremap GPT0 registers, -> looping ...");
-
- while (1);
-}
-
-void
-mpc52xx_halt(void)
-{
- local_irq_disable();
-
- while (1);
-}
-
-void
-mpc52xx_power_off(void)
-{
- /* By default we don't have any way of shut down.
- If a specific board wants to, it can set the power down
- code to any hardware implementation dependent code */
- mpc52xx_halt();
-}
-
-
-void __init
-mpc52xx_set_bat(void)
-{
- /* Set BAT 2 to map the 0xf0000000 area */
- /* This mapping is used during mpc52xx_progress,
- * mpc52xx_find_end_of_memory, and UARTs/GPIO access for debug
- */
- mb();
- mtspr(DBAT2U, 0xf0001ffe);
- mtspr(DBAT2L, 0xf000002a);
- mb();
-}
-
-void __init
-mpc52xx_map_io(void)
-{
- /* Here we only map the MBAR */
- io_block_mapping(
- MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO);
-}
-
-
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
-#ifdef MPC52xx_PF_CONSOLE_PORT
-#define MPC52xx_CONSOLE MPC52xx_PSCx(MPC52xx_PF_CONSOLE_PORT)
-#else
-#error "mpc52xx PSC for console not selected"
-#endif
-
-void
-mpc52xx_progress(char *s, unsigned short hex)
-{
- struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE;
- char c;
-
- /* Don't we need to disable serial interrupts ? */
-
- while ((c = *s++) != 0) {
- if (c == '\n') {
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXRDY)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
- }
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXRDY)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, c);
- }
-}
-
-#endif /* CONFIG_SERIAL_TEXT_DEBUG */
-
-
-unsigned long __init
-mpc52xx_find_end_of_memory(void)
-{
- u32 ramsize = __res.bi_memsize;
-
- /*
- * if bootloader passed a memsize, just use it
- * else get size from sdram config registers
- */
- if (ramsize == 0) {
- struct mpc52xx_mmap_ctl *mmap_ctl;
- u32 sdram_config_0, sdram_config_1;
-
- /* Temp BAT2 mapping active when this is called ! */
- mmap_ctl = (struct mpc52xx_mmap_ctl*) MPC52xx_MMAP_CTL;
-
- sdram_config_0 = in_be32(&mmap_ctl->sdram0);
- sdram_config_1 = in_be32(&mmap_ctl->sdram1);
-
- if ((sdram_config_0 & 0x1f) >= 0x13)
- ramsize = 1 << ((sdram_config_0 & 0xf) + 17);
-
- if (((sdram_config_1 & 0x1f) >= 0x13) &&
- ((sdram_config_1 & 0xfff00000) == ramsize))
- ramsize += 1 << ((sdram_config_1 & 0xf) + 17);
-
- iounmap(mmap_ctl);
- }
-
- return ramsize;
-}
-
-void __init
-mpc52xx_calibrate_decr(void)
-{
- int current_time, previous_time;
- int tbl_start, tbl_end;
- unsigned int xlbfreq, cpufreq, ipbfreq, pcifreq, divisor;
-
- xlbfreq = __res.bi_busfreq;
- /* if bootloader didn't pass bus frequencies, calculate them */
- if (xlbfreq == 0) {
- /* Get RTC & Clock manager modules */
- struct mpc52xx_rtc *rtc;
- struct mpc52xx_cdm *cdm;
-
- rtc = (struct mpc52xx_rtc*)
- ioremap(MPC52xx_RTC, sizeof(struct mpc52xx_rtc));
- cdm = (struct mpc52xx_cdm*)
- ioremap(MPC52xx_CDM, sizeof(struct mpc52xx_cdm));
-
- if ((rtc==NULL) || (cdm==NULL))
- panic("Can't ioremap RTC/CDM while computing bus freq");
-
- /* Count bus clock during 1/64 sec */
- out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */
- previous_time = in_be32(&rtc->time);
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_start = get_tbl();
- previous_time = current_time;
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_end = get_tbl();
- out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */
-
- /* Compute all frequency from that & CDM settings */
- xlbfreq = (tbl_end - tbl_start) << 8;
- cpufreq = (xlbfreq * core_mult[in_be32(&cdm->rstcfg)&0x1f])/10;
- ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ?
- xlbfreq / 2 : xlbfreq;
- switch (in_8(&cdm->pci_clk_sel) & 3) {
- case 0:
- pcifreq = ipbfreq;
- break;
- case 1:
- pcifreq = ipbfreq / 2;
- break;
- default:
- pcifreq = xlbfreq / 4;
- break;
- }
- __res.bi_busfreq = xlbfreq;
- __res.bi_intfreq = cpufreq;
- __res.bi_ipbfreq = ipbfreq;
- __res.bi_pcifreq = pcifreq;
-
- /* Release mapping */
- iounmap((void*)rtc);
- iounmap((void*)cdm);
- }
-
- divisor = 4;
-
- tb_ticks_per_jiffy = xlbfreq / HZ / divisor;
- tb_to_us = mulhwu_scale_factor(xlbfreq / divisor, 1000000);
-}
-
-
-void __init
-mpc52xx_add_board_devices(struct ocp_def board_ocp[]) {
- while (board_ocp->vendor != OCP_VENDOR_INVALID)
- if(ocp_add_one_device(board_ocp++))
- printk("mpc5200-ocp: Failed to add board device !\n");
-}
-
/* Timer Interrupts */
static void openpic_inittimer(u_int timer, u_int pri, u_int vector);
-static void openpic_maptimer(u_int timer, cpumask_t cpumask);
+static void openpic_maptimer(u_int timer, u_int cpumask);
/* Interrupt Sources */
static void openpic_enable_irq(u_int irq);
static void openpic_disable_irq(u_int irq);
static void openpic_initirq(u_int irq, u_int pri, u_int vector, int polarity,
int is_level);
-static void openpic_mapirq(u_int irq, cpumask_t cpumask, cpumask_t keepmask);
+static void openpic_mapirq(u_int irq, u_int cpumask, u_int keepmask);
/*
* These functions are not used but the code is kept here
*/
static void openpic_end_irq(unsigned int irq_nr);
static void openpic_ack_irq(unsigned int irq_nr);
-static void openpic_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask);
struct hw_interrupt_type open_pic = {
- .typename = " OpenPIC ",
- .enable = openpic_enable_irq,
- .disable = openpic_disable_irq,
- .ack = openpic_ack_irq,
- .end = openpic_end_irq,
- .set_affinity = openpic_set_affinity,
+ " OpenPIC ",
+ NULL,
+ NULL,
+ openpic_enable_irq,
+ openpic_disable_irq,
+ openpic_ack_irq,
+ openpic_end_irq,
+ openpic_set_affinity
};
#ifdef CONFIG_SMP
static void openpic_disable_ipi(unsigned int irq_nr);
struct hw_interrupt_type open_pic_ipi = {
- .typename = " OpenPIC ",
- .enable = openpic_enable_ipi,
- .disable = openpic_disable_ipi,
- .ack = openpic_ack_ipi,
- .end = openpic_end_ipi,
+ " OpenPIC ",
+ NULL,
+ NULL,
+ openpic_enable_ipi,
+ openpic_disable_ipi,
+ openpic_ack_ipi,
+ openpic_end_ipi,
+ 0
};
#endif /* CONFIG_SMP */
/* Disabled, Priority 0 */
openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i+offset);
/* No processor */
- openpic_maptimer(i, CPU_MASK_NONE);
+ openpic_maptimer(i, 0);
}
#ifdef CONFIG_SMP
openpic_initirq(i, 8, i+offset, (sense & IRQ_POLARITY_MASK),
(sense & IRQ_SENSE_MASK));
/* Processor 0 */
- openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE);
+ openpic_mapirq(i, 1<<0, 0);
}
/* Init descriptors */
/*
* Convert a cpu mask from logical to physical cpu numbers.
*/
-static inline cpumask_t physmask(cpumask_t cpumask)
+static inline u32 physmask(u32 cpumask)
{
int i;
- cpumask_t mask = CPU_MASK_NONE;
-
- cpus_and(cpumask, cpu_online_map, cpumask);
-
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_isset(i, cpumask))
- cpu_set(smp_hw_index[i], mask);
+ u32 mask = 0;
+ for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
+ if (cpu_online(i))
+ mask |= (cpumask & 1) << smp_hw_index[i];
return mask;
}
#else
* Externally called, however, it takes an IPI number (0...OPENPIC_NUM_IPI)
* and not a system-wide interrupt number
*/
-void openpic_cause_IPI(u_int ipi, cpumask_t cpumask)
+void openpic_cause_IPI(u_int ipi, u_int cpumask)
{
- cpumask_t phys;
DECL_THIS_CPU;
CHECK_THIS_CPU;
check_arg_ipi(ipi);
- phys = physmask(cpumask);
openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
- cpus_addr(physmask(cpumask))[0]);
+ physmask(cpumask));
}
void openpic_request_IPIs(void)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset,
openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", NULL);
+ "IPI0 (call function)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+1,
openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", NULL);
+ "IPI1 (reschedule)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+2,
openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (invalidate tlb)", NULL);
+ "IPI2 (invalidate tlb)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+3,
openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (xmon break)", NULL);
+ "IPI3 (xmon break)", 0);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(OPENPIC_VEC_IPI+open_pic_irq_offset+i);
{
#ifdef CONFIG_IRQ_ALL_CPUS
int i;
- cpumask_t msk = CPU_MASK_NONE;
+ u32 msk;
#endif
spin_lock(&openpic_setup_lock);
#ifdef CONFIG_IRQ_ALL_CPUS
- cpu_set(smp_hw_index[smp_processor_id()], msk);
+ msk = 1 << smp_hw_index[smp_processor_id()];
/* let the openpic know we want intrs. default affinity
* is 0xffffffff until changed via /proc
* in irq.c.
*/
for (i = 0; i < NumSources; i++)
- openpic_mapirq(i, msk, CPU_MASK_ALL);
+ openpic_mapirq(i, msk, ~0U);
#endif /* CONFIG_IRQ_ALL_CPUS */
openpic_set_priority(0);
/*
* Map a timer interrupt to one or more CPUs
*/
-static void __init openpic_maptimer(u_int timer, cpumask_t cpumask)
+static void __init openpic_maptimer(u_int timer, u_int cpumask)
{
- cpumask_t phys = physmask(cpumask);
check_arg_timer(timer);
openpic_write(&OpenPIC->Global.Timer[timer].Destination,
- cpus_addr(phys)[0]);
+ physmask(cpumask));
}
/*
/*
* Map an interrupt source to one or more CPUs
*/
-static void openpic_mapirq(u_int irq, cpumask_t physmask, cpumask_t keepmask)
+static void openpic_mapirq(u_int irq, u_int physmask, u_int keepmask)
{
if (ISR[irq] == 0)
return;
- if (!cpus_empty(keepmask)) {
- cpumask_t irqdest = { .bits[0] = openpic_read(&ISR[irq]->Destination) };
- cpus_and(irqdest, irqdest, keepmask);
- cpus_or(physmask, physmask, irqdest);
- }
- openpic_write(&ISR[irq]->Destination, cpus_addr(physmask)[0]);
+ if (keepmask != 0)
+ physmask |= openpic_read(&ISR[irq]->Destination) & keepmask;
+ openpic_write(&ISR[irq]->Destination, physmask);
}
#ifdef notused
#endif
}
-static void openpic_set_affinity(unsigned int irq_nr, cpumask_t cpumask)
+static void openpic_set_affinity(unsigned int irq_nr, unsigned long cpumask)
{
- openpic_mapirq(irq_nr - open_pic_irq_offset, physmask(cpumask), CPU_MASK_NONE);
+ openpic_mapirq(irq_nr - open_pic_irq_offset, physmask(cpumask), 0);
}
#ifdef CONFIG_SMP
void
smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
{
- cpumask_t mask = CPU_MASK_ALL;
/* make sure we're sending something that translates to an IPI */
if (msg > 0x3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
}
switch (target) {
case MSG_ALL:
- openpic_cause_IPI(msg, mask);
+ openpic_cause_IPI(msg, 0xffffffff);
break;
case MSG_ALL_BUT_SELF:
- cpu_clear(smp_processor_id(), mask);
- openpic_cause_IPI(msg, mask);
+ openpic_cause_IPI(msg,
+ 0xffffffff & ~(1 << smp_processor_id()));
break;
default:
- openpic_cause_IPI(msg, cpumask_of_cpu(target));
+ openpic_cause_IPI(msg, 1<<target);
break;
}
}
/*
- * arch/ppc/kernel/ppc4xx_dma.c
+ * Author: Pete Popov <ppopov@mvista.com> or source@mvista.com
*
- * IBM PPC4xx DMA engine core library
+ * arch/ppc/kernel/ppc405_dma.c
*
- * Copyright 2000-2004 MontaVista Software Inc.
+ * 2000 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
*
- * Cleaned up and converted to new DCR access
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
+ * IBM 405 DMA Controller Functions
*/
#include <linux/config.h>
#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/io.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ppc4xx_dma.h>
-
-ppc_dma_ch_t dma_channels[MAX_PPC4xx_DMA_CHANNELS];
-
-int
-ppc4xx_get_dma_status(void)
-{
- return (mfdcr(DCRN_DMASR));
-}
-
-void
-ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_src_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA64BIT
- mtdcr(DCRN_DMASAH0 + dmanr*2, (u32)(src_addr >> 32));
-#else
- mtdcr(DCRN_DMASA0 + dmanr*2, (u32)src_addr);
-#endif
-}
-
-void
-ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_dst_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA64BIT
- mtdcr(DCRN_DMADAH0 + dmanr*2, (u32)(dst_addr >> 32));
-#else
- mtdcr(DCRN_DMADA0 + dmanr*2, (u32)dst_addr);
-#endif
-}
-
-void
-ppc4xx_enable_dma(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
- DMA_CS1 | DMA_TS1 | DMA_CH1_ERR,
- DMA_CS2 | DMA_TS2 | DMA_CH2_ERR,
- DMA_CS3 | DMA_TS3 | DMA_CH3_ERR};
-
- if (p_dma_ch->in_use) {
- printk("enable_dma: channel %d in use\n", dmanr);
- return;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("enable_dma: bad channel: %d\n", dmanr);
- return;
- }
-
- if (p_dma_ch->mode == DMA_MODE_READ) {
- /* peripheral to memory */
- ppc4xx_set_src_addr(dmanr, 0);
- ppc4xx_set_dst_addr(dmanr, p_dma_ch->addr);
- } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
- /* memory to peripheral */
- ppc4xx_set_src_addr(dmanr, p_dma_ch->addr);
- ppc4xx_set_dst_addr(dmanr, 0);
- }
-
- /* for other xfer modes, the addresses are already set */
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
-
- control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
- if (p_dma_ch->mode == DMA_MODE_MM) {
- /* software initiated memory to memory */
- control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
- }
-
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- /*
- * Clear the CS, TS, RI bits for the channel from DMASR. This
- * has been observed to happen correctly only after the mode and
- * ETD/DCE bits in DMACRx are set above. Must do this before
- * enabling the channel.
- */
-
- mtdcr(DCRN_DMASR, status_bits[dmanr]);
-
- /*
- * For device-paced transfers, Terminal Count Enable apparently
- * must be on, and this must be turned on after the mode, etc.
- * bits are cleared above (at least on Redwood-6).
- */
-
- if ((p_dma_ch->mode == DMA_MODE_MM_DEVATDST) ||
- (p_dma_ch->mode == DMA_MODE_MM_DEVATSRC))
- control |= DMA_TCE_ENABLE;
-
- /*
- * Now enable the channel.
- */
-
- control |= (p_dma_ch->mode | DMA_CE_ENABLE);
-
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- p_dma_ch->in_use = 1;
-}
-
-void
-ppc4xx_disable_dma(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (!p_dma_ch->in_use) {
- printk("disable_dma: channel %d not in use\n", dmanr);
- return;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("disable_dma: bad channel: %d\n", dmanr);
- return;
- }
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control &= ~DMA_CE_ENABLE;
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- p_dma_ch->in_use = 0;
-}
-
-/*
- * Sets the dma mode for single DMA transfers only.
- * For scatter/gather transfers, the mode is passed to the
- * alloc_dma_handle() function as one of the parameters.
- *
- * The mode is simply saved and used later. This allows
- * the driver to call set_dma_mode() and set_dma_addr() in
- * any order.
- *
- * Valid mode values are:
- *
- * DMA_MODE_READ peripheral to memory
- * DMA_MODE_WRITE memory to peripheral
- * DMA_MODE_MM memory to memory
- * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
- * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
- */
-int
-ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_dma_mode: bad channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->mode = mode;
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Sets the DMA Count register. Note that 'count' is in bytes.
- * However, the DMA Count register counts the number of "transfers",
- * where each transfer is equal to the bus width. Thus, count
- * MUST be a multiple of the bus width.
- */
-void
-ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (count & 0x1)
- error = 1;
- break;
- case PW_32:
- if (count & 0x3)
- error = 1;
- break;
- case PW_64:
- if (count & 0x7)
- error = 1;
- break;
- default:
- printk("set_dma_count: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk
- ("Warning: set_dma_count count 0x%x bus width %d\n",
- count, p_dma_ch->pwidth);
- }
-#endif
+#include <asm/ppc405_dma.h>
- count = count >> p_dma_ch->shift;
-
- mtdcr(DCRN_DMACT0 + (dmanr * 0x8), count);
-}
/*
- * Returns the number of bytes left to be transfered.
- * After a DMA transfer, this should return zero.
- * Reading this while a DMA transfer is still in progress will return
- * unpredictable results.
+ * Function prototypes
*/
-int
-ppc4xx_get_dma_residue(unsigned int dmanr)
-{
- unsigned int count;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_dma_residue: bad channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+int hw_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+int init_dma_channel(unsigned int);
+int get_channel_config(unsigned int, ppc_dma_ch_t *);
+int set_channel_priority(unsigned int, unsigned int);
+unsigned int get_peripheral_width(unsigned int);
+int alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+void free_dma_handle(sgl_handle_t);
- count = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
- return (count << p_dma_ch->shift);
-}
-
-/*
- * Sets the DMA address for a memory to peripheral or peripheral
- * to memory transfer. The address is just saved in the channel
- * structure for now and used later in enable_dma().
- */
-void
-ppc4xx_set_dma_addr(unsigned int dmanr, phys_addr_t addr)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_dma_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if ((unsigned) addr & 0x1)
- error = 1;
- break;
- case PW_32:
- if ((unsigned) addr & 0x3)
- error = 1;
- break;
- case PW_64:
- if ((unsigned) addr & 0x7)
- error = 1;
- break;
- default:
- printk("ppc4xx_set_dma_addr: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk("Warning: ppc4xx_set_dma_addr addr 0x%x bus width %d\n",
- addr, p_dma_ch->pwidth);
- }
-#endif
-
- /* save dma address and program it later after we know the xfer mode */
- p_dma_ch->addr = addr;
-}
-
-/*
- * Sets both DMA addresses for a memory to memory transfer.
- * For memory to peripheral or peripheral to memory transfers
- * the function set_dma_addr() should be used instead.
- */
-void
-ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
- phys_addr_t dst_dma_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_dma_addr2: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef DEBUG_4xxDMA
- {
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (((unsigned) src_dma_addr & 0x1) ||
- ((unsigned) dst_dma_addr & 0x1)
- )
- error = 1;
- break;
- case PW_32:
- if (((unsigned) src_dma_addr & 0x3) ||
- ((unsigned) dst_dma_addr & 0x3)
- )
- error = 1;
- break;
- case PW_64:
- if (((unsigned) src_dma_addr & 0x7) ||
- ((unsigned) dst_dma_addr & 0x7)
- )
- error = 1;
- break;
- default:
- printk("ppc4xx_set_dma_addr2: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk
- ("Warning: ppc4xx_set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
- src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
- }
-#endif
-
- ppc4xx_set_src_addr(dmanr, src_dma_addr);
- ppc4xx_set_dst_addr(dmanr, dst_dma_addr);
-}
-
-/*
- * Enables the channel interrupt.
- *
- * If performing a scatter/gatter transfer, this function
- * MUST be called before calling alloc_dma_handle() and building
- * the sgl list. Otherwise, interrupts will not be enabled, if
- * they were previously disabled.
- */
-int
-ppc4xx_enable_dma_interrupt(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_enable_dma_interrupt: bad channel: %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->int_enable = 1;
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Disables the channel interrupt.
- *
- * If performing a scatter/gatter transfer, this function
- * MUST be called before calling alloc_dma_handle() and building
- * the sgl list. Otherwise, interrupts will not be disabled, if
- * they were previously enabled.
- */
-int
-ppc4xx_disable_dma_interrupt(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_disable_dma_interrupt: bad channel: %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->int_enable = 0;
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- return DMA_STATUS_GOOD;
-}
+ppc_dma_ch_t dma_channels[MAX_405GP_DMA_CHANNELS];
/*
* Configures a DMA channel, including the peripheral bus width, if a
* called from platform specific init code. The driver should not need to
* call this function.
*/
-int
-ppc4xx_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t * p_init)
+int hw_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t *p_init)
{
- unsigned int polarity;
- uint32_t control = 0;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
- DMA_MODE_WRITE = 0; /* Memory to Peripheral */
-
- if (!p_init) {
- printk("ppc4xx_init_dma_channel: NULL p_init\n");
- return DMA_STATUS_NULL_POINTER;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_init_dma_channel: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ if (!p_init) {
+ printk("hw_init_dma_channel: NULL p_init\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("hw_init_dma_channel: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- /* Setup the control register based on the values passed to
- * us in p_init. Then, over-write the control register with this
- * new value.
- */
- control |= SET_DMA_CONTROL;
-
- /* clear all polarity signals and then "or" in new signal levels */
- polarity &= ~GET_DMA_POLARITY(dmanr);
- polarity |= p_dma_ch->polarity;
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+
+ control |= (
+ SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */
+ SET_DMA_PL(p_init->pl) | /* peripheral location */
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */
+ );
+
+ switch (dmanr) {
+ case 0:
+ /* clear all polarity signals and then "or" in new signal levels */
+ polarity &= ~(DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
+ polarity |= p_dma_ch->polarity;
#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
+ mtdcr(DCRN_POL, polarity);
#endif
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- /* save these values in our dma channel structure */
- memcpy(p_dma_ch, p_init, sizeof (ppc_dma_ch_t));
-
- /*
- * The peripheral width values written in the control register are:
- * PW_8 0
- * PW_16 1
- * PW_32 2
- * PW_64 3
- *
- * Since the DMA count register takes the number of "transfers",
- * we need to divide the count sent to us in certain
- * functions by the appropriate number. It so happens that our
- * right shift value is equal to the peripheral width value.
- */
- p_dma_ch->shift = p_init->pwidth;
-
- /*
- * Save the control word for easy access.
- */
- p_dma_ch->control = control;
-
- mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
- return DMA_STATUS_GOOD;
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ polarity &= ~(DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ polarity &= ~(DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ polarity &= ~(DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ /* save these values in our dma channel structure */
+ memcpy(p_dma_ch, p_init, sizeof(ppc_dma_ch_t));
+
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ p_dma_ch->shift = p_init->pwidth;
+
+ /*
+ * Save the control word for easy access.
+ */
+ p_dma_ch->control = control;
+
+ mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
+ return DMA_STATUS_GOOD;
}
+
+
+
/*
* This function returns the channel configuration.
*/
-int
-ppc4xx_get_channel_config(unsigned int dmanr, ppc_dma_ch_t * p_dma_ch)
+int get_channel_config(unsigned int dmanr, ppc_dma_ch_t *p_dma_ch)
{
- unsigned int polarity;
- unsigned int control;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_channel_config: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int polarity;
+ unsigned int control;
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- p_dma_ch->polarity = polarity & GET_DMA_POLARITY(dmanr);
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
-
- p_dma_ch->cp = GET_DMA_PRIORITY(control);
- p_dma_ch->pwidth = GET_DMA_PW(control);
- p_dma_ch->psc = GET_DMA_PSC(control);
- p_dma_ch->pwc = GET_DMA_PWC(control);
- p_dma_ch->phc = GET_DMA_PHC(control);
- p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
- p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
- p_dma_ch->shift = GET_DMA_PW(control);
-
-#ifdef CONFIG_PPC4xx_EDMA
- p_dma_ch->pf = GET_DMA_PREFETCH(control);
-#else
- p_dma_ch->ch_enable = GET_DMA_CH(control);
- p_dma_ch->ece_enable = GET_DMA_ECE(control);
- p_dma_ch->tcd_disable = GET_DMA_TCD(control);
-#endif
- return DMA_STATUS_GOOD;
+ switch (dmanr) {
+ case 0:
+ p_dma_ch->polarity =
+ polarity & (DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
+ control = mfdcr(DCRN_DMACR0);
+ break;
+ case 1:
+ p_dma_ch->polarity =
+ polarity & (DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
+ control = mfdcr(DCRN_DMACR1);
+ break;
+ case 2:
+ p_dma_ch->polarity =
+ polarity & (DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
+ control = mfdcr(DCRN_DMACR2);
+ break;
+ case 3:
+ p_dma_ch->polarity =
+ polarity & (DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
+ control = mfdcr(DCRN_DMACR3);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+
+ return DMA_STATUS_GOOD;
}
/*
* PRIORITY_HIGH
*
*/
-int
-ppc4xx_set_channel_priority(unsigned int dmanr, unsigned int priority)
+int set_channel_priority(unsigned int dmanr, unsigned int priority)
{
- unsigned int control;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_channel_priority: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int control;
+
+#ifdef DEBUG_405DMA
+ if ( (priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) &&
+ (priority != PRIORITY_HIGH)) {
+ printk("set_channel_priority: bad priority: 0x%x\n", priority);
+ }
+#endif
- if ((priority != PRIORITY_LOW) &&
- (priority != PRIORITY_MID_LOW) &&
- (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
- printk("ppc4xx_set_channel_priority: bad priority: 0x%x\n", priority);
- }
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_channel_priority: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control |= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
- return DMA_STATUS_GOOD;
-}
/*
* Returns the width of the peripheral attached to this channel. This assumes
*
* The function returns 0 on error.
*/
-unsigned int
-ppc4xx_get_peripheral_width(unsigned int dmanr)
+unsigned int get_peripheral_width(unsigned int dmanr)
{
- unsigned int control;
+ unsigned int control;
+
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_peripheral_width: bad channel: %d\n", dmanr);
+#endif
+ return 0;
+ }
+ return(GET_DMA_PW(control));
+}
+
+
+
+
+/*
+ * Create a scatter/gather list handle. This is simply a structure which
+ * describes a scatter/gather list.
+ *
+ * A handle is returned in "handle" which the driver should save in order to
+ * be able to access this list later. A chunk of memory will be allocated
+ * to be used by the API for internal management purposes, including managing
+ * the sg list and allocating memory for the sgl descriptors. One page should
+ * be more than enough for that purpose. Perhaps it's a bit wasteful to use
+ * a whole page for a single sg list, but most likely there will be only one
+ * sg list per channel.
+ *
+ * Interrupt notes:
+ * Each sgl descriptor has a copy of the DMA control word which the DMA engine
+ * loads in the control register. The control word has a "global" interrupt
+ * enable bit for that channel. Interrupts are further qualified by a few bits
+ * in the sgl descriptor count register. In order to setup an sgl, we have to
+ * know ahead of time whether or not interrupts will be enabled at the completion
+ * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
+ * be called before calling alloc_dma_handle(). If the interrupt mode will never
+ * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
+ * do not have to be called -- interrupts will be enabled or disabled based
+ * on how the channel was configured after powerup by the hw_init_dma_channel()
+ * function. Each sgl descriptor will be setup to interrupt if an error occurs;
+ * however, only the last descriptor will be setup to interrupt. Thus, an
+ * interrupt will occur (if interrupts are enabled) only after the complete
+ * sgl transfer is done.
+ */
+int alloc_dma_handle(sgl_handle_t *phandle, unsigned int mode, unsigned int dmanr)
+{
+ sgl_list_info_t *psgl;
+ dma_addr_t dma_addr;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ uint32_t sg_command;
+ void *ret;
+
+#ifdef DEBUG_405DMA
+ if (!phandle) {
+ printk("alloc_dma_handle: null handle pointer\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+ switch (mode) {
+ case DMA_MODE_READ:
+ case DMA_MODE_WRITE:
+ case DMA_MODE_MM:
+ case DMA_MODE_MM_DEVATSRC:
+ case DMA_MODE_MM_DEVATDST:
+ break;
+ default:
+ printk("alloc_dma_handle: bad mode 0x%x\n", mode);
+ return DMA_STATUS_BAD_MODE;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("alloc_dma_handle: invalid channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_peripheral_width: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
+ /* Get a page of memory, which is zeroed out by pci_alloc_consistent() */
+
+/* wrong not a pci device - armin */
+ /* psgl = (sgl_list_info_t *) pci_alloc_consistent(NULL, SGL_LIST_SIZE, &dma_addr);
+*/
+
+ ret = consistent_alloc(GFP_ATOMIC |GFP_DMA, SGL_LIST_SIZE, &dma_addr);
+ if (ret != NULL) {
+ memset(ret, 0,SGL_LIST_SIZE );
+ psgl = (sgl_list_info_t *) ret;
}
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- return (GET_DMA_PW(control));
+ if (psgl == NULL) {
+ *phandle = (sgl_handle_t)NULL;
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+ psgl->dma_addr = dma_addr;
+ psgl->dmanr = dmanr;
+
+ /*
+ * Modify and save the control word. These word will get written to each sgl
+ * descriptor. The DMA engine then loads this control word into the control
+ * register every time it reads a new descriptor.
+ */
+ psgl->control = p_dma_ch->control;
+ psgl->control &= ~(DMA_TM_MASK | DMA_TD); /* clear all "mode" bits first */
+ psgl->control |= (mode | DMA_CH_ENABLE); /* save the control word along with the mode */
+
+ if (p_dma_ch->int_enable) {
+ psgl->control |= DMA_CIE_ENABLE; /* channel interrupt enabled */
+ }
+ else {
+ psgl->control &= ~DMA_CIE_ENABLE;
+ }
+
+#if DCRN_ASGC > 0
+ sg_command = mfdcr(DCRN_ASGC);
+ switch (dmanr) {
+ case 0:
+ sg_command |= SSG0_MASK_ENABLE;
+ break;
+ case 1:
+ sg_command |= SSG1_MASK_ENABLE;
+ break;
+ case 2:
+ sg_command |= SSG2_MASK_ENABLE;
+ break;
+ case 3:
+ sg_command |= SSG3_MASK_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("alloc_dma_handle: bad channel: %d\n", dmanr);
+#endif
+ free_dma_handle((sgl_handle_t)psgl);
+ *phandle = (sgl_handle_t)NULL;
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ mtdcr(DCRN_ASGC, sg_command); /* enable writing to this channel's sgl control bits */
+#else
+ (void)sg_command;
+#endif
+ psgl->sgl_control = SG_ERI_ENABLE | SG_LINK; /* sgl descriptor control bits */
+
+ if (p_dma_ch->int_enable) {
+ if (p_dma_ch->tce_enable)
+ psgl->sgl_control |= SG_TCI_ENABLE;
+ else
+ psgl->sgl_control |= SG_ETI_ENABLE;
+ }
+
+ *phandle = (sgl_handle_t)psgl;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
+ * The list must be empty (contain no elements).
+ */
+void free_dma_handle(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+
+ if (!handle) {
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: got NULL\n");
+#endif
+ return;
+ }
+ else if (psgl->phead) {
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: list not empty\n");
+#endif
+ return;
+ }
+ else if (!psgl->dma_addr) { /* should never happen */
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: no dma address\n");
+#endif
+ return;
+ }
+
+ /* wrong not a PCI device -armin */
+ /* pci_free_consistent(NULL, SGL_LIST_SIZE, (void *)psgl, psgl->dma_addr); */
+ // free_pages((unsigned long)psgl, get_order(SGL_LIST_SIZE));
+ consistent_free((void *)psgl);
+
+
}
-EXPORT_SYMBOL(ppc4xx_init_dma_channel);
-EXPORT_SYMBOL(ppc4xx_get_channel_config);
-EXPORT_SYMBOL(ppc4xx_set_channel_priority);
-EXPORT_SYMBOL(ppc4xx_get_peripheral_width);
+EXPORT_SYMBOL(hw_init_dma_channel);
+EXPORT_SYMBOL(get_channel_config);
+EXPORT_SYMBOL(set_channel_priority);
+EXPORT_SYMBOL(get_peripheral_width);
+EXPORT_SYMBOL(alloc_dma_handle);
+EXPORT_SYMBOL(free_dma_handle);
EXPORT_SYMBOL(dma_channels);
-EXPORT_SYMBOL(ppc4xx_set_src_addr);
-EXPORT_SYMBOL(ppc4xx_set_dst_addr);
-EXPORT_SYMBOL(ppc4xx_set_dma_addr);
-EXPORT_SYMBOL(ppc4xx_set_dma_addr2);
-EXPORT_SYMBOL(ppc4xx_enable_dma);
-EXPORT_SYMBOL(ppc4xx_disable_dma);
-EXPORT_SYMBOL(ppc4xx_set_dma_mode);
-EXPORT_SYMBOL(ppc4xx_set_dma_count);
-EXPORT_SYMBOL(ppc4xx_get_dma_residue);
-EXPORT_SYMBOL(ppc4xx_enable_dma_interrupt);
-EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
-EXPORT_SYMBOL(ppc4xx_get_dma_status);
+++ /dev/null
-/*
- * arch/ppc/kernel/ppc4xx_sgdma.c
- *
- * IBM PPC4xx DMA engine scatter/gather library
- *
- * Copyright 2002-2003 MontaVista Software Inc.
- *
- * Cleaned up and converted to new DCR access
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ppc4xx_dma.h>
-
-void
-ppc4xx_set_sg_addr(int dmanr, phys_addr_t sg_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_sg_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA_64BIT
- mtdcr(DCRN_ASGH0 + (dmanr * 0x8), (u32)(sg_addr >> 32));
-#endif
- mtdcr(DCRN_ASG0 + (dmanr * 0x8), (u32)sg_addr);
-}
-
-/*
- * Add a new sgl descriptor to the end of a scatter/gather list
- * which was created by alloc_dma_handle().
- *
- * For a memory to memory transfer, both dma addresses must be
- * valid. For a peripheral to memory transfer, one of the addresses
- * must be set to NULL, depending on the direction of the transfer:
- * memory to peripheral: set dst_addr to NULL,
- * peripheral to memory: set src_addr to NULL.
- */
-int
-ppc4xx_add_dma_sgl(sgl_handle_t handle, phys_addr_t src_addr, phys_addr_t dst_addr,
- unsigned int count)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
-
- if (!handle) {
- printk("ppc4xx_add_dma_sgl: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- }
-
- if (psgl->dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_add_dma_sgl: bad channel: %d\n", psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch = &dma_channels[psgl->dmanr];
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- unsigned int aligned =
- (unsigned) src_addr | (unsigned) dst_addr | count;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (aligned & 0x1)
- error = 1;
- break;
- case PW_32:
- if (aligned & 0x3)
- error = 1;
- break;
- case PW_64:
- if (aligned & 0x7)
- error = 1;
- break;
- default:
- printk("ppc4xx_add_dma_sgl: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return DMA_STATUS_GENERAL_ERROR;
- }
- if (error)
- printk
- ("Alignment warning: ppc4xx_add_dma_sgl src 0x%x dst 0x%x count 0x%x bus width var %d\n",
- src_addr, dst_addr, count, p_dma_ch->pwidth);
-
- }
-#endif
-
- if ((unsigned) (psgl->ptail + 1) >= ((unsigned) psgl + SGL_LIST_SIZE)) {
- printk("sgl handle out of memory \n");
- return DMA_STATUS_OUT_OF_MEMORY;
- }
-
- if (!psgl->ptail) {
- psgl->phead = (ppc_sgl_t *)
- ((unsigned) psgl + sizeof (sgl_list_info_t));
- psgl->phead_dma = psgl->dma_addr + sizeof(sgl_list_info_t);
- psgl->ptail = psgl->phead;
- psgl->ptail_dma = psgl->phead_dma;
- } else {
- psgl->ptail->next = psgl->ptail_dma + sizeof(ppc_sgl_t);
- psgl->ptail++;
- psgl->ptail_dma += sizeof(ppc_sgl_t);
- }
-
- psgl->ptail->control = psgl->control;
- psgl->ptail->src_addr = src_addr;
- psgl->ptail->dst_addr = dst_addr;
- psgl->ptail->control_count = (count >> p_dma_ch->shift) |
- psgl->sgl_control;
- psgl->ptail->next = (uint32_t) NULL;
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Enable (start) the DMA described by the sgl handle.
- */
-void
-ppc4xx_enable_dma_sgl(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
- uint32_t sg_command;
-
- if (!handle) {
- printk("ppc4xx_enable_dma_sgl: null handle\n");
- return;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n",
- psgl->dmanr);
- return;
- } else if (!psgl->phead) {
- printk("ppc4xx_enable_dma_sgl: sg list empty\n");
- return;
- }
-
- p_dma_ch = &dma_channels[psgl->dmanr];
- psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */
- sg_command = mfdcr(DCRN_ASGC);
-
- ppc4xx_set_sg_addr(psgl->dmanr, psgl->phead_dma);
-
- sg_command |= SSG_ENABLE(psgl->dmanr);
-
- mtdcr(DCRN_ASGC, sg_command); /* start transfer */
-}
-
-/*
- * Halt an active scatter/gather DMA operation.
- */
-void
-ppc4xx_disable_dma_sgl(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- uint32_t sg_command;
-
- if (!handle) {
- printk("ppc4xx_enable_dma_sgl: null handle\n");
- return;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n",
- psgl->dmanr);
- return;
- }
-
- sg_command = mfdcr(DCRN_ASGC);
- sg_command &= ~SSG_ENABLE(psgl->dmanr);
- mtdcr(DCRN_ASGC, sg_command); /* stop transfer */
-}
-
-/*
- * Returns number of bytes left to be transferred from the entire sgl list.
- * *src_addr and *dst_addr get set to the source/destination address of
- * the sgl descriptor where the DMA stopped.
- *
- * An sgl transfer must NOT be active when this function is called.
- */
-int
-ppc4xx_get_dma_sgl_residue(sgl_handle_t handle, phys_addr_t * src_addr,
- phys_addr_t * dst_addr)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
- ppc_sgl_t *pnext, *sgl_addr;
- uint32_t count_left;
-
- if (!handle) {
- printk("ppc4xx_get_dma_sgl_residue: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_get_dma_sgl_residue: bad channel in handle %d\n",
- psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- sgl_addr = (ppc_sgl_t *) __va(mfdcr(DCRN_ASG0 + (psgl->dmanr * 0x8)));
- count_left = mfdcr(DCRN_DMACT0 + (psgl->dmanr * 0x8));
-
- if (!sgl_addr) {
- printk("ppc4xx_get_dma_sgl_residue: sgl addr register is null\n");
- goto error;
- }
-
- pnext = psgl->phead;
- while (pnext &&
- ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE) &&
- (pnext != sgl_addr))
- ) {
- pnext++;
- }
-
- if (pnext == sgl_addr) { /* found the sgl descriptor */
-
- *src_addr = pnext->src_addr;
- *dst_addr = pnext->dst_addr;
-
- /*
- * Now search the remaining descriptors and add their count.
- * We already have the remaining count from this descriptor in
- * count_left.
- */
- pnext++;
-
- while ((pnext != psgl->ptail) &&
- ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE))
- ) {
- count_left += pnext->control_count & SG_COUNT_MASK;
- }
-
- if (pnext != psgl->ptail) { /* should never happen */
- printk
- ("ppc4xx_get_dma_sgl_residue error (1) psgl->ptail 0x%x handle 0x%x\n",
- (unsigned int) psgl->ptail, (unsigned int) handle);
- goto error;
- }
-
- /* success */
- p_dma_ch = &dma_channels[psgl->dmanr];
- return (count_left << p_dma_ch->shift); /* count in bytes */
-
- } else {
- /* this shouldn't happen */
- printk
- ("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n",
- (unsigned int) sgl_addr, (unsigned int) handle);
-
- }
-
- error:
- *src_addr = (phys_addr_t) NULL;
- *dst_addr = (phys_addr_t) NULL;
- return 0;
-}
-
-/*
- * Returns the address(es) of the buffer(s) contained in the head element of
- * the scatter/gather list. The element is removed from the scatter/gather
- * list and the next element becomes the head.
- *
- * This function should only be called when the DMA is not active.
- */
-int
-ppc4xx_delete_dma_sgl_element(sgl_handle_t handle, phys_addr_t * src_dma_addr,
- phys_addr_t * dst_dma_addr)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
-
- if (!handle) {
- printk("ppc4xx_delete_sgl_element: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_delete_sgl_element: bad channel in handle %d\n",
- psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- if (!psgl->phead) {
- printk("ppc4xx_delete_sgl_element: sgl list empty\n");
- *src_dma_addr = (phys_addr_t) NULL;
- *dst_dma_addr = (phys_addr_t) NULL;
- return DMA_STATUS_SGL_LIST_EMPTY;
- }
-
- *src_dma_addr = (phys_addr_t) psgl->phead->src_addr;
- *dst_dma_addr = (phys_addr_t) psgl->phead->dst_addr;
-
- if (psgl->phead == psgl->ptail) {
- /* last descriptor on the list */
- psgl->phead = NULL;
- psgl->ptail = NULL;
- } else {
- psgl->phead++;
- psgl->phead_dma += sizeof(ppc_sgl_t);
- }
-
- return DMA_STATUS_GOOD;
-}
-
-
-/*
- * Create a scatter/gather list handle. This is simply a structure which
- * describes a scatter/gather list.
- *
- * A handle is returned in "handle" which the driver should save in order to
- * be able to access this list later. A chunk of memory will be allocated
- * to be used by the API for internal management purposes, including managing
- * the sg list and allocating memory for the sgl descriptors. One page should
- * be more than enough for that purpose. Perhaps it's a bit wasteful to use
- * a whole page for a single sg list, but most likely there will be only one
- * sg list per channel.
- *
- * Interrupt notes:
- * Each sgl descriptor has a copy of the DMA control word which the DMA engine
- * loads in the control register. The control word has a "global" interrupt
- * enable bit for that channel. Interrupts are further qualified by a few bits
- * in the sgl descriptor count register. In order to setup an sgl, we have to
- * know ahead of time whether or not interrupts will be enabled at the completion
- * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
- * be called before calling alloc_dma_handle(). If the interrupt mode will never
- * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
- * do not have to be called -- interrupts will be enabled or disabled based
- * on how the channel was configured after powerup by the hw_init_dma_channel()
- * function. Each sgl descriptor will be setup to interrupt if an error occurs;
- * however, only the last descriptor will be setup to interrupt. Thus, an
- * interrupt will occur (if interrupts are enabled) only after the complete
- * sgl transfer is done.
- */
-int
-ppc4xx_alloc_dma_handle(sgl_handle_t * phandle, unsigned int mode, unsigned int dmanr)
-{
- sgl_list_info_t *psgl;
- dma_addr_t dma_addr;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- uint32_t sg_command;
- void *ret;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_alloc_dma_handle: invalid channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- if (!phandle) {
- printk("ppc4xx_alloc_dma_handle: null handle pointer\n");
- return DMA_STATUS_NULL_POINTER;
- }
-
- /* Get a page of memory, which is zeroed out by consistent_alloc() */
- ret = dma_alloc_coherent(NULL, DMA_PPC4xx_SIZE, &dma_addr, GFP_KERNEL);
- if (ret != NULL) {
- memset(ret, 0, DMA_PPC4xx_SIZE);
- psgl = (sgl_list_info_t *) ret;
- }
-
- if (psgl == NULL) {
- *phandle = (sgl_handle_t) NULL;
- return DMA_STATUS_OUT_OF_MEMORY;
- }
-
- psgl->dma_addr = dma_addr;
- psgl->dmanr = dmanr;
-
- /*
- * Modify and save the control word. These words will be
- * written to each sgl descriptor. The DMA engine then
- * loads this control word into the control register
- * every time it reads a new descriptor.
- */
- psgl->control = p_dma_ch->control;
- /* Clear all mode bits */
- psgl->control &= ~(DMA_TM_MASK | DMA_TD);
- /* Save control word and mode */
- psgl->control |= (mode | DMA_CE_ENABLE);
-
- /* In MM mode, we must set ETD/TCE */
- if (mode == DMA_MODE_MM)
- psgl->control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
-
- if (p_dma_ch->int_enable) {
- /* Enable channel interrupt */
- psgl->control |= DMA_CIE_ENABLE;
- } else {
- psgl->control &= ~DMA_CIE_ENABLE;
- }
-
- sg_command = mfdcr(DCRN_ASGC);
- sg_command |= SSG_MASK_ENABLE(dmanr);
-
- /* Enable SGL control access */
- mtdcr(DCRN_ASGC, sg_command);
- psgl->sgl_control = SG_ERI_ENABLE | SG_LINK;
-
- if (p_dma_ch->int_enable) {
- if (p_dma_ch->tce_enable)
- psgl->sgl_control |= SG_TCI_ENABLE;
- else
- psgl->sgl_control |= SG_ETI_ENABLE;
- }
-
- *phandle = (sgl_handle_t) psgl;
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
- * The list must be empty (contain no elements).
- */
-void
-ppc4xx_free_dma_handle(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
-
- if (!handle) {
- printk("ppc4xx_free_dma_handle: got NULL\n");
- return;
- } else if (psgl->phead) {
- printk("ppc4xx_free_dma_handle: list not empty\n");
- return;
- } else if (!psgl->dma_addr) { /* should never happen */
- printk("ppc4xx_free_dma_handle: no dma address\n");
- return;
- }
-
- dma_free_coherent(NULL, DMA_PPC4xx_SIZE, (void *) psgl, 0);
-}
-
-EXPORT_SYMBOL(ppc4xx_alloc_dma_handle);
-EXPORT_SYMBOL(ppc4xx_free_dma_handle);
-EXPORT_SYMBOL(ppc4xx_add_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_delete_dma_sgl_element);
-EXPORT_SYMBOL(ppc4xx_enable_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_disable_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_get_dma_sgl_residue);
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI1_LOWER_MEM */
+ /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */
pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
- /* Enable, Mem R/W */
- pci->powar1 = 0x80044000 |
- (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
+ pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
- /* Setup outboud IO windows @ MPC85XX_PCI1_IO_BASE */
+ /* Setup 16M outboud IO windows @ 0xe2000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
- /* Enable, IO R/W */
- pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
+ pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin);
extern int mpc85xx_exclude_device(u_char bus, u_char devfn);
-#ifdef CONFIG_85xx_PCI2
+#if CONFIG_85xx_PCI2
static void __init
mpc85xx_setup_pci2(struct pci_controller *hose)
{
pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
MPC85xx_PCI2_SIZE);
- early_read_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, &temps);
+ early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
- early_write_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, temps);
- early_write_config_byte(hose, hose->bus_offset, 0, PCI_LATENCY_TIMER, 0x80);
+ early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
+ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
/* Disable all windows (except powar0 since its ignored) */
pci->powar1 = 0;
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI2_LOWER_MEM */
+ /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */
pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
- /* Enable, Mem R/W */
- pci->powar1 = 0x80044000 |
- (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
+ pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
- /* Setup outboud IO windows @ MPC85XX_PCI2_IO_BASE */
+ /* Setup 16M outboud IO windows @ 0xe3000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
- /* Enable, IO R/W */
- pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
+ pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
#define __PPC_SYSLIB_PPC85XX_SETUP_H
#include <linux/config.h>
+#include <linux/serial.h>
#include <linux/init.h>
#include <asm/ppcboot.h>
/* All newworld pmac machines and CHRPs now use the interrupt tree */
for (np = allnodes; np != NULL; np = np->allnext) {
- if (get_property(np, "interrupt-parent", NULL)) {
+ if (get_property(np, "interrupt-parent", 0)) {
use_of_interrupt_tree = 1;
break;
}
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", NULL);
- np->type = get_property(np, "device_type", NULL);
+ np->name = get_property(np, "name", 0);
+ np->type = get_property(np, "device_type", 0);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
nsizec = *ip;
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
*prevp = np;
prevp = &np->next;
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
*lenp = pp->length;
return pp->value;
}
- return NULL;
+ return 0;
}
/*
static void * early_get_property(unsigned long base, unsigned long node,
char *prop);
-prom_entry prom __initdata;
-ihandle prom_chosen __initdata;
-ihandle prom_stdout __initdata;
+prom_entry prom __initdata = 0;
+ihandle prom_chosen __initdata = 0;
+ihandle prom_stdout __initdata = 0;
-char *prom_display_paths[FB_MAX] __initdata;
+char *prom_display_paths[FB_MAX] __initdata = { 0, };
phandle prom_display_nodes[FB_MAX] __initdata;
-unsigned int prom_num_displays __initdata;
-char *of_stdout_device __initdata;
-static ihandle prom_disp_node __initdata;
+unsigned int prom_num_displays __initdata = 0;
+char *of_stdout_device __initdata = 0;
+static ihandle prom_disp_node __initdata = 0;
unsigned int rtas_data; /* physical pointer */
unsigned int rtas_entry; /* physical pointer */
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = NULL;
+ prom_args.args[i + nargs] = 0;
prom(&prom_args);
return prom_args.args[nargs];
}
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = NULL;
+ prom_args.args[i + nargs] = 0;
prom(&prom_args);
for (i = 1; i < nret; ++i)
rets[i-1] = prom_args.args[nargs + i];
};
const unsigned char *clut;
- prom_disp_node = NULL;
+ prom_disp_node = 0;
- for (node = NULL; prom_next_node(&node); ) {
+ for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
}
allnextp = &allnodes;
mem_start = ALIGNUL(mem_start);
- new_start = inspect_node(root, NULL, mem_start, mem_end, &allnextp);
- *allnextp = NULL;
+ new_start = inspect_node(root, 0, mem_start, mem_end, &allnextp);
+ *allnextp = 0;
return new_start;
}
/* look for cpus */
*(unsigned long *)(0x0) = 0;
asm volatile("dcbf 0,%0": : "r" (0) : "memory");
- for (node = NULL; prom_next_node(&node); ) {
+ for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
prom_print("returning 0x");
prom_print_hex(phys);
prom_print("from prom_init\n");
- prom_stdout = NULL;
+ prom_stdout = 0;
return phys;
}
return (void *)((unsigned long)pp->value + base);
}
}
- return NULL;
+ return 0;
}
/* Is boot-info compatible ? */
boot_infos = PTRUNRELOC(bi);
if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
- bi->logicalDisplayBase = NULL;
+ bi->logicalDisplayBase = 0;
#ifdef CONFIG_BOOTX_TEXT
btext_init(bi);
/* The zero index is used to indicate the end of the list of
operands. */
#define UNUSED (0)
- { 0, 0, NULL, NULL, 0 },
+ { 0, 0, 0, 0, 0 },
/* The BA field in an XL form instruction. */
#define BA (1)
#define BA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
/* The BA field in an XL form instruction when it must be the same
as the BT field in the same instruction. */
/* The BB field in an XL form instruction. */
#define BB (3)
#define BB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 11, 0, 0, PPC_OPERAND_CR },
/* The BB field in an XL form instruction when it must be the same
as the BA field in the same instruction. */
/* The BF field in an X or XL form instruction. */
#define BF (11)
- { 3, 23, NULL, NULL, PPC_OPERAND_CR },
+ { 3, 23, 0, 0, PPC_OPERAND_CR },
/* An optional BF field. This is used for comparison instructions,
in which an omitted BF field is taken as zero. */
#define OBF (12)
- { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 23, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The BFA field in an X or XL form instruction. */
#define BFA (13)
- { 3, 18, NULL, NULL, PPC_OPERAND_CR },
+ { 3, 18, 0, 0, PPC_OPERAND_CR },
/* The BI field in a B form or XL form instruction. */
#define BI (14)
#define BI_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
/* The BO field in a B form instruction. Certain values are
illegal. */
/* The BT field in an X or XL form instruction. */
#define BT (17)
- { 5, 21, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 21, 0, 0, PPC_OPERAND_CR },
/* The condition register number portion of the BI field in a B form
or XL form instruction. This is used for the extended
conditional branch mnemonics, which set the lower two bits of the
BI field. This field is optional. */
#define CR (18)
- { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 18, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The D field in a D form instruction. This is a displacement off
a register, and implies that the next operand is a register in
parentheses. */
#define D (19)
- { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
+ { 16, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DS field in a DS form instruction. This is like D, but the
lower two bits are forced to zero. */
/* The FL1 field in a POWER SC form instruction. */
#define FL1 (21)
- { 4, 12, NULL, NULL, 0 },
+ { 4, 12, 0, 0, 0 },
/* The FL2 field in a POWER SC form instruction. */
#define FL2 (22)
- { 3, 2, NULL, NULL, 0 },
+ { 3, 2, 0, 0, 0 },
/* The FLM field in an XFL form instruction. */
#define FLM (23)
- { 8, 17, NULL, NULL, 0 },
+ { 8, 17, 0, 0, 0 },
/* The FRA field in an X or A form instruction. */
#define FRA (24)
#define FRA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 16, 0, 0, PPC_OPERAND_FPR },
/* The FRB field in an X or A form instruction. */
#define FRB (25)
#define FRB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 11, 0, 0, PPC_OPERAND_FPR },
/* The FRC field in an A form instruction. */
#define FRC (26)
#define FRC_MASK (0x1f << 6)
- { 5, 6, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 6, 0, 0, PPC_OPERAND_FPR },
/* The FRS field in an X form instruction or the FRT field in a D, X
or A form instruction. */
#define FRS (27)
#define FRT (FRS)
- { 5, 21, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 21, 0, 0, PPC_OPERAND_FPR },
/* The FXM field in an XFX instruction. */
#define FXM (28)
#define FXM_MASK (0xff << 12)
- { 8, 12, NULL, NULL, 0 },
+ { 8, 12, 0, 0, 0 },
/* The L field in a D or X form instruction. */
#define L (29)
- { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
+ { 1, 21, 0, 0, PPC_OPERAND_OPTIONAL },
/* The LEV field in a POWER SC form instruction. */
#define LEV (30)
- { 7, 5, NULL, NULL, 0 },
+ { 7, 5, 0, 0, 0 },
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
/* The MB field in an M form instruction. */
#define MB (33)
#define MB_MASK (0x1f << 6)
- { 5, 6, NULL, NULL, 0 },
+ { 5, 6, 0, 0, 0 },
/* The ME field in an M form instruction. */
#define ME (34)
#define ME_MASK (0x1f << 1)
- { 5, 1, NULL, NULL, 0 },
+ { 5, 1, 0, 0, 0 },
/* The MB and ME fields in an M form instruction expressed a single
operand which is a bitmask indicating which bits to select. This
is a two operand form using PPC_OPERAND_NEXT. See the
description in opcode/ppc.h for what this means. */
#define MBE (35)
- { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
+ { 5, 6, 0, 0, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
{ 32, 0, insert_mbe, extract_mbe, 0 },
/* The MB or ME field in an MD or MDS form instruction. The high
/* The RA field in an D, DS, X, XO, M, or MDS form instruction. */
#define RA (40)
#define RA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 16, 0, 0, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
#define RAL (41)
- { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ral, 0, PPC_OPERAND_GPR },
/* The RA field in an lmw instruction, which has special value
restrictions. */
#define RAM (42)
- { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ram, 0, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
#define RAS (43)
- { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ras, 0, PPC_OPERAND_GPR },
/* The RB field in an X, XO, M, or MDS form instruction. */
#define RB (44)
#define RB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 11, 0, 0, PPC_OPERAND_GPR },
/* The RB field in an X form instruction when it must be the same as
the RS field in the instruction. This is used for extended
#define RS (46)
#define RT (RS)
#define RT_MASK (0x1f << 21)
- { 5, 21, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 21, 0, 0, PPC_OPERAND_GPR },
/* The SH field in an X or M form instruction. */
#define SH (47)
#define SH_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, 0 },
+ { 5, 11, 0, 0, 0 },
/* The SH field in an MD form instruction. This is split. */
#define SH6 (48)
/* The SI field in a D form instruction. */
#define SI (49)
- { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED },
/* The SI field in a D form instruction when we accept a wide range
of positive values. */
#define SISIGNOPT (50)
- { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
/* The SPR field in an XFX form instruction. This is flipped--the
lower 5 bits are stored in the upper 5 and vice- versa. */
/* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
#define SPRBAT (52)
#define SPRBAT_MASK (0x3 << 17)
- { 2, 17, NULL, NULL, 0 },
+ { 2, 17, 0, 0, 0 },
/* The SPRG register number in an XFX form m[ft]sprg instruction. */
#define SPRG (53)
#define SPRG_MASK (0x3 << 16)
- { 2, 16, NULL, NULL, 0 },
+ { 2, 16, 0, 0, 0 },
/* The SR field in an X form instruction. */
#define SR (54)
- { 4, 16, NULL, NULL, 0 },
+ { 4, 16, 0, 0, 0 },
/* The SV field in a POWER SC form instruction. */
#define SV (55)
- { 14, 2, NULL, NULL, 0 },
+ { 14, 2, 0, 0, 0 },
/* The TBR field in an XFX form instruction. This is like the SPR
field, but it is optional. */
/* The TO field in a D or X form instruction. */
#define TO (57)
#define TO_MASK (0x1f << 21)
- { 5, 21, NULL, NULL, 0 },
+ { 5, 21, 0, 0, 0 },
/* The U field in an X form instruction. */
#define U (58)
- { 4, 12, NULL, NULL, 0 },
+ { 4, 12, 0, 0, 0 },
/* The UI field in a D form instruction. */
#define UI (59)
- { 16, 0, NULL, NULL, 0 },
+ { 16, 0, 0, 0, 0 },
};
/* The functions used to insert and extract complicated operands. */
scc_initialized = 1;
if (via_modem) {
for (;;) {
- xmon_write(NULL, "ATE1V1\r", 7);
+ xmon_write(0, "ATE1V1\r", 7);
if (xmon_expect("OK", 5)) {
- xmon_write(NULL, "ATA\r", 4);
+ xmon_write(0, "ATA\r", 4);
if (xmon_expect("CONNECT", 40))
break;
}
- xmon_write(NULL, "+++", 3);
+ xmon_write(0, "+++", 3);
xmon_expect("OK", 3);
}
}
c = xmon_getchar();
if (c == -1) {
if (p == str)
- return NULL;
+ return 0;
break;
}
*p++ = c;
set_backlight_level(BACKLIGHT_MAX);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
#endif /* CONFIG_PMAC_BACKLIGHT */
cmd = cmds(excp);
if (cmd == 's') {
insert_bpts();
}
xmon_leave();
- xmon_regs[smp_processor_id()] = NULL;
+ xmon_regs[smp_processor_id()] = 0;
#ifdef CONFIG_SMP
clear_bit(0, &got_xmon);
clear_bit(smp_processor_id(), &cpus_in_xmon);
for (i = 0; i < NBPTS; ++i, ++bp)
if (bp->enabled && pc == bp->address)
return bp;
- return NULL;
+ return 0;
}
static void
xmon_puts(sysmap);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
}
else
printf("No System.map\n");
__delay(200);
n = size;
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return n;
}
} else {
printf("*** Error writing address %x\n", adrs + n);
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return n;
}
} else {
printf("*** %x exception occurred\n", fault_except);
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
}
/* Input scanning routines */
} while (cur);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
termch = 0;
break;
}
*(ep++) = 0;
if (saddr)
*saddr = prev;
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return rbuffer;
}
prev = next;
bail:
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return NULL;
}
}
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return result;
}
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
+endmenu
+
config SPINLINE
bool "Inline spinlock code at each call site"
depends on SMP && !PPC_SPLPAR && !PPC_ISERIES
If in doubt, say N.
-endmenu
-
source "kernel/vserver/Kconfig"
source "security/Kconfig"
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
+#include <asm/processor.h>
.globl __div64_32
__div64_32:
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
obj-$(CONFIG_PPC_OF) += of_device.o
-pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
- iSeries_IoMmTable.o
-pci-obj-$(CONFIG_PPC_PSERIES) += pci_dn.o pci_dma_direct.o
+obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_iommu.o
-obj-$(CONFIG_PCI) += pci.o pci_iommu.o $(pci-obj-y)
+ifdef CONFIG_PPC_ISERIES
+obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
+ iSeries_IoMmTable.o
+else
+obj-$(CONFIG_PCI) += pci_dma_direct.o
+endif
obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
-obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o \
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
- DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
+ DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_next_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
- DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
- DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
- DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
- DEFINE(PACASLBR3, offsetof(struct paca_struct, slb_r3));
-#ifdef CONFIG_HUGETLB_PAGE
- DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs));
-#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACAPROFENABLED, offsetof(struct paca_struct, prof_enabled));
DEFINE(PACAPROFLEN, offsetof(struct paca_struct, prof_len));
cur_cpu_spec->firmware_features);
}
-void chrp_progress(char *s, unsigned short hex)
+void
+chrp_progress(char *s, unsigned short hex)
{
struct device_node *root;
int width, *p;
return;
if (max_width == 0) {
- if ((root = find_path_device("/rtas")) &&
+ if ( (root = find_path_device("/rtas")) &&
(p = (unsigned int *)get_property(root,
"ibm,display-line-length",
- NULL)))
+ NULL)) )
max_width = *p;
else
max_width = 0x10;
display_character = rtas_token("display-character");
set_indicator = rtas_token("set-indicator");
}
-
if (display_character == RTAS_UNKNOWN_SERVICE) {
+ /* use hex display */
+ if (set_indicator == RTAS_UNKNOWN_SERVICE)
+ return;
+ rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
+ return;
+ }
+
+ if(display_character == RTAS_UNKNOWN_SERVICE) {
/* use hex display if available */
- if (set_indicator != RTAS_UNKNOWN_SERVICE)
+ if(set_indicator != RTAS_UNKNOWN_SERVICE)
rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
return;
}
spin_lock(&progress_lock);
- /*
- * Last write ended with newline, but we didn't print it since
+ /* Last write ended with newline, but we didn't print it since
* it would just clear the bottom line of output. Print it now
* instead.
*
* If no newline is pending, print a CR to start output at the
* beginning of the line.
*/
- if (pending_newline) {
+ if(pending_newline) {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
pending_newline = 0;
- } else {
+ } else
rtas_call(display_character, 1, 1, NULL, '\r');
- }
width = max_width;
os = s;
while (*os) {
- if (*os == '\n' || *os == '\r') {
+ if(*os == '\n' || *os == '\r') {
/* Blank to end of line. */
- while (width-- > 0)
+ while(width-- > 0)
rtas_call(display_character, 1, 1, NULL, ' ');
/* If newline is the last character, save it
* until next call to avoid bumping up the
* display output.
*/
- if (*os == '\n' && !os[1]) {
+ if(*os == '\n' && !os[1]) {
pending_newline = 1;
spin_unlock(&progress_lock);
return;
/* RTAS wants CR-LF, not just LF */
- if (*os == '\n') {
+ if(*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
} else {
os++;
/* if we overwrite the screen length */
- if (width <= 0)
- while ((*os != 0) && (*os != '\n') && (*os != '\r'))
+ if ( width <= 0 )
+ while ( (*os != 0) && (*os != '\n') && (*os != '\r') )
os++;
}
/* Blank to end of line. */
- while (width-- > 0)
- rtas_call(display_character, 1, 1, NULL, ' ');
+ while ( width-- > 0 )
+ rtas_call(display_character, 1, 1, NULL, ' ' );
spin_unlock(&progress_lock);
}
{
struct eeh_early_enable_info *info = data;
int ret;
- char *status = get_property(dn, "status", NULL);
- u32 *class_code = (u32 *)get_property(dn, "class-code", NULL);
- u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL);
- u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
+ char *status = get_property(dn, "status", 0);
+ u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
+ u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", 0);
+ u32 *device_id = (u32 *)get_property(dn, "device-id", 0);
u32 *regs;
int enable;
/* Ok... see if this device supports EEH. Some do, some don't,
* and the only way to find out is to check each and every one. */
- regs = (u32 *)get_property(dn, "reg", NULL);
+ regs = (u32 *)get_property(dn, "reg", 0);
if (regs) {
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
info.buid_lo = BUID_LO(buid);
info.buid_hi = BUID_HI(buid);
- traverse_pci_devices(phb, early_enable_eeh, &info);
+ traverse_pci_devices(phb, early_enable_eeh, NULL, &info);
}
if (eeh_subsystem_enabled) {
/* Build list of strings to match */
nstrs = 0;
- s = (char *)get_property(dn, "ibm,loc-code", NULL);
+ s = (char *)get_property(dn, "ibm,loc-code", 0);
if (s)
strs[nstrs++] = s;
sprintf(devname, "dev%04x:%04x", vendor_id, device_id);
*/
ld r11,.SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT
- beq 15f
+ beq- 15f
ld r11,.SYS_CALL_TABLE32@toc(2)
clrldi r3,r3,32
clrldi r4,r4,32
15:
slwi r0,r0,3
ldx r10,r11,r0 /* Fetch system call handler [ptr] */
- mtctr r10
- bctrl /* Call handler */
+ mtlr r10
+ blrl /* Call handler */
syscall_exit:
#ifdef SHOW_SYSCALLS
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- beq- 1f /* only restore r13 if */
+ beq 1f /* only restore r13 if */
ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtspr SRR0,r7
mtspr SRR1,r8
rfid
- b . /* prevent speculative execution */
syscall_enosys:
li r3,-ENOSYS
ld r1,GPR1(r1)
rfid
- b . /* prevent speculative execution */
+ b .
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work:
mtspr SRR0,r5
mtspr SRR1,r6
rfid
- b . /* prevent speculative execution */
_STATIC(rtas_return_loc)
/* relocation is off at this point */
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
_STATIC(rtas_restore_regs)
/* relocation is on at this point */
#define EX_R13 32
#define EX_SRR0 40
#define EX_DAR 48
-#define EX_LR 48 /* SLB miss saves LR, but not DAR */
#define EX_DSISR 56
#define EX_CCR 60
mtspr SRR0,r12; \
mfspr r12,SRR1; /* and SRR1 */ \
mtspr SRR1,r10; \
- rfid; \
- b . /* prevent speculative execution */
+ rfid
/*
* This is the start of the interrupt handlers for iSeries
. = n; \
.globl label##_Pseries; \
label##_Pseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_Iseries; \
label##_Iseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
#define MASKABLE_EXCEPTION_ISERIES(n, label) \
.globl label##_Iseries; \
label##_Iseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROFENABLED(r13); \
. = 0x200
_MachineCheckPseries:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
. = 0x300
.globl DataAccess_Pseries
DataAccess_Pseries:
- HMT_MEDIUM
mtspr SPRG1,r13
BEGIN_FTR_SECTION
mtspr SPRG2,r12
. = 0x380
.globl DataAccessSLB_Pseries
DataAccessSLB_Pseries:
- HMT_MEDIUM
mtspr SPRG1,r13
- mfspr r13,SPRG3 /* get paca address into r13 */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACASLBR3(r13)
- mfspr r9,SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
- clrrdi r12,r13,32 /* get high part of &label */
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,(.do_slb_miss)@l
- ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- mfspr r3,DAR
- rfid
- b . /* prevent speculative execution */
+ mtspr SPRG2,r12
+ mfspr r13,DAR
+ mfcr r12
+ srdi r13,r13,60
+ cmpdi r13,0xc
+ beq .do_slb_bolted_Pseries
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccessSLB_common)
STD_EXCEPTION_PSERIES(0x400, InstructionAccess)
-
- . = 0x480
- .globl InstructionAccessSLB_Pseries
-InstructionAccessSLB_Pseries:
- HMT_MEDIUM
- mtspr SPRG1,r13
- mfspr r13,SPRG3 /* get paca address into r13 */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACASLBR3(r13)
- mfspr r9,SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
- clrrdi r12,r13,32 /* get high part of &label */
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,(.do_slb_miss)@l
- ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- mr r3,r11 /* SRR0 is faulting address */
- rfid
- b . /* prevent speculative execution */
-
+ STD_EXCEPTION_PSERIES(0x480, InstructionAccessSLB)
STD_EXCEPTION_PSERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_PSERIES(0x600, Alignment)
STD_EXCEPTION_PSERIES(0x700, ProgramCheck)
. = 0xc00
.globl SystemCall_Pseries
SystemCall_Pseries:
- HMT_MEDIUM
mr r9,r13
mfmsr r10
mfspr r13,SPRG3
mfspr r12,SRR1
mtspr SRR1,r10
rfid
- b . /* prevent speculative execution */
STD_EXCEPTION_PSERIES(0xd00, SingleStep)
STD_EXCEPTION_PSERIES(0xe00, Trap_0e)
mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+_GLOBAL(do_slb_bolted_Pseries)
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_slb_bolted)
+
/* Space for the naca. Architected to be located at real address
* NACA_PHYS_ADDR. Various tools rely on this location being fixed.
.globl DataAccessSLB_Iseries
DataAccessSLB_Iseries:
mtspr SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
- std r3,PACASLBR3(r13)
- ld r11,PACALPPACA+LPPACASRR0(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- mfspr r3,DAR
- b .do_slb_miss
-
- STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
+ mtspr SPRG2,r12
+ mfspr r13,DAR
+ mfcr r12
+ srdi r13,r13,60
+ cmpdi r13,0xc
+ beq .do_slb_bolted_Iseries
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
+ EXCEPTION_PROLOG_ISERIES_2
+ b DataAccessSLB_common
- .globl InstructionAccessSLB_Iseries
-InstructionAccessSLB_Iseries:
- mtspr SPRG1,r13 /* save r13 */
+.do_slb_bolted_Iseries:
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
- std r3,PACASLBR3(r13)
- ld r11,PACALPPACA+LPPACASRR0(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- mr r3,r11
- b .do_slb_miss
+ EXCEPTION_PROLOG_ISERIES_2
+ b .do_slb_bolted
+ STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
+ STD_EXCEPTION_ISERIES(0x480, InstructionAccessSLB, PACA_EXGEN)
MASKABLE_EXCEPTION_ISERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_ISERIES(0x600, Alignment, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x700, ProgramCheck, PACA_EXGEN)
li r11,1
stb r11,PACALPPACA+LPPACADECRINT(r13)
lwz r12,PACADEFAULTDECR(r13)
- mtspr SPRN_DEC,r12
+ mtspr DEC,r12
/* fall through */
.globl HardwareInterrupt_Iseries_masked
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
#endif
/*
. = 0x8000
.globl SystemReset_FWNMI
SystemReset_FWNMI:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, SystemReset_common)
.globl MachineCheck_FWNMI
MachineCheck_FWNMI:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
REST_4GPRS(10, r1)
ld r1,GPR1(r1)
rfid
- b . /* prevent speculative execution */
unrecov_fer:
bl .save_nvgprs
li r5,0x300
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl DataAccessSLB_common
+DataAccessSLB_common:
+ mfspr r10,DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ std r3,_DAR(r1)
+ bl .slb_allocate
+ cmpdi r3,0 /* Check return code */
+ beq fast_exception_return /* Return if we succeeded */
+ li r5,0
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
.align 7
.globl InstructionAccess_common
InstructionAccess_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl InstructionAccessSLB_common
+InstructionAccessSLB_common:
+ EXCEPTION_PROLOG_COMMON(0x480, PACA_EXGEN)
+ ld r3,_NIP(r1) /* SRR0 = NIA */
+ bl .slb_allocate
+ or. r3,r3,r3 /* Check return code */
+ beq+ fast_exception_return /* Return if we succeeded */
+
+ ld r4,_NIP(r1)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
.align 7
.globl HardwareInterrupt_common
.globl HardwareInterrupt_entry
bl .local_irq_restore
b 11f
#else
- beq fast_exception_return /* Return from exception on success */
+ beq+ fast_exception_return /* Return from exception on success */
/* fall through */
#endif
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r11 and r12 contain the saved SRR0 and SRR1.
- * r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
-_GLOBAL(do_slb_miss)
- mflr r10
-
+/* XXX note fix masking in get_kernel_vsid to match */
+_GLOBAL(do_slb_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
- bl .slb_allocate /* handle it */
+ /*
+ * We take the next entry, round robin. Previously we tried
+ * to find a free slot first but that took too long. Unfortunately
+ * we dont have any LRU information to help us choose a slot.
+ */
- /* All done -- return from exception. */
+ /* r13 = paca */
+1: ld r10,PACASTABRR(r13)
+ addi r9,r10,1
+ cmpdi r9,SLB_NUM_ENTRIES
+ blt+ 2f
+ li r9,2 /* dont touch slot 0 or 1 */
+2: std r9,PACASTABRR(r13)
+
+ /* r13 = paca, r10 = entry */
+
+ /*
+ * Never cast out the segment for our kernel stack. Since we
+ * dont invalidate the ERAT we could have a valid translation
+ * for the kernel stack during the first part of exception exit
+ * which gets invalidated due to a tlbie from another cpu at a
+ * non recoverable point (after setting srr0/1) - Anton
+ */
+ slbmfee r9,r10
+ srdi r9,r9,27
+ /*
+ * Use paca->ksave as the value of the kernel stack pointer,
+ * because this is valid at all times.
+ * The >> 27 (rather than >> 28) is so that the LSB is the
+ * valid bit - this way we check valid and ESID in one compare.
+ * In order to completely close the tiny race in the context
+ * switch (between updating r1 and updating paca->ksave),
+ * we check against both r1 and paca->ksave.
+ */
+ srdi r11,r1,27
+ ori r11,r11,1
+ cmpd r11,r9
+ beq- 1b
+ ld r11,PACAKSAVE(r13)
+ srdi r11,r11,27
+ ori r11,r11,1
+ cmpd r11,r9
+ beq- 1b
+
+ /* r13 = paca, r10 = entry */
+
+ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
+ mfspr r9,DAR
+ rldicl r11,r9,36,51
+ sldi r11,r11,15
+ srdi r9,r9,60
+ or r11,r11,r9
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACASLBR3(r13)
+ /* VSID_RANDOMIZER */
+ li r9,9
+ sldi r9,r9,32
+ oris r9,r9,58231
+ ori r9,r9,39831
+
+ /* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */
+ mulld r11,r11,r9
+ clrldi r11,r11,28
+
+ /* r13 = paca, r10 = entry, r11 = vsid */
+
+ /* Put together slb word1 */
+ sldi r11,r11,12
+
+BEGIN_FTR_SECTION
+ /* set kp and c bits */
+ ori r11,r11,0x480
+END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+BEGIN_FTR_SECTION
+ /* set kp, l and c bits */
+ ori r11,r11,0x580
+END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
+
+ /* r13 = paca, r10 = entry, r11 = slb word1 */
+
+ /* Put together slb word0 */
+ mfspr r9,DAR
+ clrrdi r9,r9,28 /* get the new esid */
+ oris r9,r9,0x800 /* set valid bit */
+ rldimi r9,r10,0,52 /* insert entry */
+
+ /* r13 = paca, r9 = slb word0, r11 = slb word1 */
+
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing .
+ */
+ slbmte r11,r9
+
+ /* All done -- return from exception. */
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
- mtlr r10
-
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- unrecov_slb
-.machine push
-.machine "power4"
+ /*
+ * Until everyone updates binutils hardwire the POWER4 optimised
+ * single field mtcrf
+ */
+#if 0
+ .machine push
+ .machine "power4"
mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
+ .machine pop
+#else
+ .long 0x7d380120
+#endif
+
+ mfmsr r10
+ clrrdi r10,r10,2
+ mtmsrd r10,1
mtspr SRR0,r11
mtspr SRR1,r12
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
mtspr SRR1,r3
sync
rfid
- b . /* prevent speculative execution */
_GLOBAL(__start_initialization_pSeries)
mr r31,r3 /* save parameters */
mr r30,r4
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
/*
* Running with relocation on at this point. All we want to do is
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
#endif /* CONFIG_PPC_PSERIES */
/* This is where all platforms converge execution */
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <asm/hvcall.h>
#include <asm/prom.h>
#include <asm/hvconsole.h>
return 0;
}
-EXPORT_SYMBOL(hvc_get_chars);
-
int hvc_put_chars(int index, const char *buf, int count)
{
unsigned long *lbuf = (unsigned long *) buf;
return -1;
}
-EXPORT_SYMBOL(hvc_put_chars);
-
/* return the number of client vterms present */
/* XXX this requires an interface change to handle multiple discontiguous
* vterms */
* we should _always_ be able to find one. */
vty = of_find_node_by_name(NULL, "vty");
if (vty && device_is_compatible(vty, "hvterm1")) {
- u32 *termno = (u32 *)get_property(vty, "reg", NULL);
+ u32 *termno = (u32 *)get_property(vty, "reg", 0);
if (termno && start_termno)
*start_termno = *termno;
+++ /dev/null
-/*
- * hvcserver.c
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <asm/hvcall.h>
-#include <asm/hvcserver.h>
-#include <asm/io.h>
-
-#define HVCS_ARCH_VERSION "1.0.0"
-
-MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
-MODULE_DESCRIPTION("IBM hvcs ppc64 API");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HVCS_ARCH_VERSION);
-
-/*
- * Convert arch specific return codes into relevant errnos. The hvcs
- * functions aren't performance sensitive, so this conversion isn't an
- * issue.
- */
-int hvcs_convert(long to_convert)
-{
- switch (to_convert) {
- case H_Success:
- return 0;
- case H_Parameter:
- return -EINVAL;
- case H_Hardware:
- return -EIO;
- case H_Busy:
- case H_LongBusyOrder1msec:
- case H_LongBusyOrder10msec:
- case H_LongBusyOrder100msec:
- case H_LongBusyOrder1sec:
- case H_LongBusyOrder10sec:
- case H_LongBusyOrder100sec:
- return -EBUSY;
- case H_Function: /* fall through */
- default:
- return -EPERM;
- }
-}
-
-int hvcs_free_partner_info(struct list_head *head)
-{
- struct hvcs_partner_info *pi;
- struct list_head *element;
-
- if (!head) {
- return -EINVAL;
- }
-
- while (!list_empty(head)) {
- element = head->next;
- pi = list_entry(element, struct hvcs_partner_info, node);
- list_del(element);
- kfree(pi);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(hvcs_free_partner_info);
-
-/* Helper function for hvcs_get_partner_info */
-int hvcs_next_partner(unsigned int unit_address,
- unsigned long last_p_partition_ID,
- unsigned long last_p_unit_address, unsigned long *pi_buff)
-
-{
- long retval;
- retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
- last_p_partition_ID,
- last_p_unit_address, virt_to_phys(pi_buff));
- return hvcs_convert(retval);
-}
-
-/*
- * The unit_address parameter is the unit address of the vty-server vdevice
- * in whose partner information the caller is interested. This function
- * uses a pointer to a list_head instance in which to store the partner info.
- * This function returns non-zero on success, or if there is no partner info.
- *
- * Invocation of this function should always be followed by an invocation of
- * hvcs_free_partner_info() using a pointer to the SAME list head instance
- * that was used to store the partner_info list.
- */
-int hvcs_get_partner_info(unsigned int unit_address, struct list_head *head,
- unsigned long *pi_buff)
-{
- /*
- * This is a page sized buffer to be passed to hvcall per invocation.
- * NOTE: the first long returned is unit_address. The second long
- * returned is the partition ID and starting with pi_buff[2] are
- * HVCS_CLC_LENGTH characters, which are diff size than the unsigned
- * long, hence the casting mumbojumbo you see later.
- */
- unsigned long last_p_partition_ID;
- unsigned long last_p_unit_address;
- struct hvcs_partner_info *next_partner_info = NULL;
- int more = 1;
- int retval;
-
- memset(pi_buff, 0x00, PAGE_SIZE);
- /* invalid parameters */
- if (!head)
- return -EINVAL;
-
- last_p_partition_ID = last_p_unit_address = ~0UL;
- INIT_LIST_HEAD(head);
-
- if (!pi_buff)
- return -ENOMEM;
-
- do {
- retval = hvcs_next_partner(unit_address, last_p_partition_ID,
- last_p_unit_address, pi_buff);
- if (retval) {
- /*
- * Don't indicate that we've failed if we have
- * any list elements.
- */
- if (!list_empty(head))
- return 0;
- return retval;
- }
-
- last_p_partition_ID = pi_buff[0];
- last_p_unit_address = pi_buff[1];
-
- /* This indicates that there are no further partners */
- if (last_p_partition_ID == ~0UL
- && last_p_unit_address == ~0UL)
- break;
-
- /* This is a very small struct and will be freed soon in
- * hvcs_free_partner_info(). */
- next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
- GFP_ATOMIC);
-
- if (!next_partner_info) {
- printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
- " allocate partner info struct.\n");
- hvcs_free_partner_info(head);
- return -ENOMEM;
- }
-
- next_partner_info->unit_address
- = (unsigned int)last_p_unit_address;
- next_partner_info->partition_ID
- = (unsigned int)last_p_partition_ID;
-
- /* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
- (char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
-
- list_add_tail(&(next_partner_info->node), head);
- next_partner_info = NULL;
-
- } while (more);
-
- return 0;
-}
-EXPORT_SYMBOL(hvcs_get_partner_info);
-
-/*
- * If this function is called once and -EINVAL is returned it may
- * indicate that the partner info needs to be refreshed for the
- * target unit address at which point the caller must invoke
- * hvcs_get_partner_info() and then call this function again. If,
- * for a second time, -EINVAL is returned then it indicates that
- * there is probably already a partner connection registered to a
- * different vty-server@ vdevice. It is also possible that a second
- * -EINVAL may indicate that one of the parms is not valid, for
- * instance if the link was removed between the vty-server@ vdevice
- * and the vty@ vdevice that you are trying to open. Don't shoot the
- * messenger. Firmware implemented it this way.
- */
-int hvcs_register_connection( unsigned int unit_address,
- unsigned int p_partition_ID, unsigned int p_unit_address)
-{
- long retval;
- retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
- p_partition_ID, p_unit_address);
- return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_register_connection);
-
-/*
- * If -EBUSY is returned continue to call this function
- * until 0 is returned.
- */
-int hvcs_free_connection(unsigned int unit_address)
-{
- long retval;
- retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
- return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_free_connection);
#include <asm/mmu_context.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/abs_addr.h>
-#include <linux/spinlock.h>
-
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
-/*
- * Very primitive algorithm for picking up a lock
- */
-static inline void iSeries_hlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
+#if 0
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
-static inline void iSeries_hunlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+#include <asm/cputable.h>
+#endif
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary,
if (secondary)
return -1;
- iSeries_hlock(hpte_group);
-
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
- BUG_ON(lhpte.dw0.dw0.v);
+ if (lhpte.dw0.dw0.v)
+ panic("select_hpte_slot found entry already valid\n");
- if (slot == -1) { /* No available entry found in either group */
- iSeries_hunlock(hpte_group);
+ if (slot == -1) /* No available entry found in either group */
return -1;
- }
if (slot < 0) { /* MSB set means secondary group */
secondary = 1;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
- iSeries_hunlock(hpte_group);
-
return (secondary << 3) | (slot & 7);
}
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
- iSeries_hlock(hpte_group);
-
for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 =
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
- iSeries_hunlock(hpte_group);
return i;
}
slot_offset &= 0x7;
}
- iSeries_hunlock(hpte_group);
-
return -1;
}
HPTE hpte;
unsigned long avpn = va >> 23;
- iSeries_hlock(slot);
-
HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
- iSeries_hunlock(slot);
return 0;
}
- iSeries_hunlock(slot);
-
return -1;
}
{
HPTE lhpte;
unsigned long avpn = va >> 23;
- unsigned long flags;
-
- local_irq_save(flags);
-
- iSeries_hlock(slot);
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
-
- iSeries_hunlock(slot);
-
- local_irq_restore(flags);
}
void hpte_init_iSeries(void)
#endif /* CONFIG_SMP */
+/* XXX Make this into free_irq() - Anton */
+
+/* This could be promoted to a real free_irq() ... */
+static int
+do_free_irq(int irq, void* dev_id)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ struct irqaction **p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ kfree(action);
+ return 0;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return -ENOENT;
+}
+
+
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
- return -EINVAL;
+ /* We could implement really free_irq() instead of that... */
+ return do_free_irq(irq, dev_id);
action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
void free_irq(unsigned int irq, void *dev_id)
{
- irq_desc_t *desc = get_irq_desc(irq);
- struct irqaction **p;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
- if (action) {
- struct irqaction **pp = p;
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- mask_irq(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- /* Wait to make sure it's not being used on another CPU */
- synchronize_irq(irq);
- kfree(action);
- return;
- }
- printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- break;
- }
- return;
+ request_irq(irq, NULL, 0, NULL, dev_id);
}
EXPORT_SYMBOL(free_irq);
}
#ifdef CONFIG_PPC_ISERIES
-void do_IRQ(struct pt_regs *regs)
+int do_IRQ(struct pt_regs *regs)
{
struct paca_struct *lpaca;
struct ItLpQueue *lpq;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
+
+ return 1; /* lets ret_from_int know we can do checks */
}
#else /* CONFIG_PPC_ISERIES */
-void do_IRQ(struct pt_regs *regs)
+int do_IRQ(struct pt_regs *regs)
{
- int irq;
+ int irq, first = 1;
irq_enter();
}
#endif
- irq = ppc_md.get_irq(regs);
-
- if (irq >= 0)
+ /*
+ * Every arch is required to implement ppc_md.get_irq.
+ * This function will either return an irq number or -1 to
+ * indicate there are no more pending. But the first time
+ * through the loop this means there wasn't an IRQ pending.
+ * The value -2 is for buggy hardware and means that this IRQ
+ * has already been handled. -- Tom
+ */
+ while ((irq = ppc_md.get_irq(regs)) >= 0) {
ppc_irq_dispatch_handler(regs, irq);
- else
+ first = 0;
+ }
+ if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
+
+ return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
.llong .compat_sys_sched_setaffinity
.llong .compat_sys_sched_getaffinity
.llong .sys_ni_syscall
-#ifdef CONFIG_TUX
- .llong .__sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .llong .sys_tux
-# else
- .llong .sys_ni_syscall
-# endif
-#endif
+ .llong .sys_ni_syscall /* 225 - reserved for tux */
.llong .sys32_sendfile64
.llong .compat_sys_io_setup
.llong .sys_io_destroy
.llong .sys_sched_setaffinity
.llong .sys_sched_getaffinity
.llong .sys_ni_syscall
-#ifdef CONFIG_TUX
- .llong .__sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .llong .sys_tux
-# else
- .llong .sys_ni_syscall
-# endif
-#endif
+ .llong .sys_ni_syscall /* 225 - reserved for tux */
.llong .sys_ni_syscall /* 32bit only sendfile64 */
.llong .sys_io_setup
.llong .sys_io_destroy
struct device_node *np;
int i;
unsigned int *addrp;
- unsigned char* chrp_int_ack_special = NULL;
+ unsigned char* chrp_int_ack_special = 0;
unsigned char init_senses[NR_IRQS - NUM_ISA_INTERRUPTS];
int nmi_irq = -1;
#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(openpic_vec_ipi, openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", NULL);
+ "IPI0 (call function)", 0);
request_irq(openpic_vec_ipi+1, openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", NULL);
+ "IPI1 (reschedule)", 0);
request_irq(openpic_vec_ipi+2, openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (unused)", NULL);
+ "IPI2 (unused)", 0);
request_irq(openpic_vec_ipi+3, openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (debugger break)", NULL);
+ "IPI3 (debugger break)", 0);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(openpic_vec_ipi+i);
HPTE *hptep = htab_data.htab + slot;
Hpte_dword0 dw0;
unsigned long avpn = va >> 23;
+ unsigned long flags;
int ret = 0;
if (large)
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
return ret;
if (large)
avpn &= ~0x1UL;
- local_irq_save(flags);
pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0;
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
- local_irq_restore(flags);
}
static void pSeries_flush_hash_range(unsigned long context,
/* XXX fix for large ptes */
unsigned long large = 0;
- local_irq_save(flags);
-
j = 0;
for (i = 0; i < number; i++) {
if ((batch->addr[i] >= USER_START) &&
} else {
/* XXX double check that it is safe to take this late */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
asm volatile("ptesync":::"memory");
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
-
- local_irq_restore(flags);
}
void hpte_init_pSeries(void)
bus = pci_bus_b(ln);
busdn = PCI_GET_DN(bus);
- dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", NULL);
+ dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", 0);
if (dma_window) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
{
unsigned int *dma_window;
- dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", NULL);
+ dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", 0);
if (!dma_window)
panic("iommu_table_setparms_lpar: device %s has no"
}
/* now we have the stdout node; figure out what type of device it is. */
- name = (char *)get_property(stdout_node, "name", NULL);
+ name = (char *)get_property(stdout_node, "name", 0);
if (!name) {
printk(KERN_WARNING "stdout node missing 'name' property!\n");
goto out;
if (strncmp(name, "vty", 3) == 0) {
if (device_is_compatible(stdout_node, "hvterm1")) {
- termno = (u32 *)get_property(stdout_node, "reg", NULL);
+ termno = (u32 *)get_property(stdout_node, "reg", 0);
if (termno) {
vtermno = termno[0];
ppc_md.udbg_putc = udbg_putcLP;
isa_dn = of_find_node_by_type(NULL, "isa");
if (isa_dn) {
isa_io_base = pci_io_base;
+ of_node_put(isa_dn);
pci_process_ISA_OF_ranges(isa_dn,
hose->io_base_phys,
hose->io_base_virt);
- of_node_put(isa_dn);
/* Allow all IO */
io_page_mask = -1;
}
BUG(); /* No I/O resource for this PHB? */
if (request_resource(&ioport_resource, res))
- printk(KERN_ERR "Failed to request IO on "
- "PCI domain %d\n", pci_domain_nr(bus));
-
+ printk(KERN_ERR "Failed to request IO"
+ "on hose %d\n", 0 /* FIXME */);
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
BUG(); /* No memory resource for this PHB? */
bus->resource[i+1] = res;
if (res->flags && request_resource(&iomem_resource, res))
- printk(KERN_ERR "Failed to request MEM on "
- "PCI domain %d\n",
- pci_domain_nr(bus));
+ printk(KERN_ERR "Failed to request MEM"
+ "on hose %d\n", 0 /* FIXME */);
}
} else if (pci_probe_only &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
/* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled.
- *
- * ABI requires stack to be 128-byte aligned
*/
-char emergency_stack[PAGE_SIZE * NR_CPUS] __attribute__((aligned(128)));
+char emergency_stack[PAGE_SIZE * NR_CPUS];
/* The Paca is an array with one entry per processor. Each contains an
* ItLpPaca, which contains the information shared between the
.stab_addr = (asrv), /* Virt pointer to segment table */ \
.emergency_sp = &emergency_stack[((number)+1) * PAGE_SIZE], \
.cpu_start = (start), /* Processor start */ \
+ .stab_next_rr = 1, \
.lppaca = { \
.xDesc = 0xd397d781, /* "LpPa" */ \
.xSize = sizeof(struct ItLpPaca), \
#ifdef CONFIG_PPC_ISERIES
PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR),
#else
- PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
+ PACAINITDATA( 0, 1, 0, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
#endif
- PACAINITDATA( 1, 0, NULL, 0, 0),
- PACAINITDATA( 2, 0, NULL, 0, 0),
- PACAINITDATA( 3, 0, NULL, 0, 0),
- PACAINITDATA( 4, 0, NULL, 0, 0),
- PACAINITDATA( 5, 0, NULL, 0, 0),
- PACAINITDATA( 6, 0, NULL, 0, 0),
- PACAINITDATA( 7, 0, NULL, 0, 0),
- PACAINITDATA( 8, 0, NULL, 0, 0),
- PACAINITDATA( 9, 0, NULL, 0, 0),
- PACAINITDATA(10, 0, NULL, 0, 0),
- PACAINITDATA(11, 0, NULL, 0, 0),
- PACAINITDATA(12, 0, NULL, 0, 0),
- PACAINITDATA(13, 0, NULL, 0, 0),
- PACAINITDATA(14, 0, NULL, 0, 0),
- PACAINITDATA(15, 0, NULL, 0, 0),
- PACAINITDATA(16, 0, NULL, 0, 0),
- PACAINITDATA(17, 0, NULL, 0, 0),
- PACAINITDATA(18, 0, NULL, 0, 0),
- PACAINITDATA(19, 0, NULL, 0, 0),
- PACAINITDATA(20, 0, NULL, 0, 0),
- PACAINITDATA(21, 0, NULL, 0, 0),
- PACAINITDATA(22, 0, NULL, 0, 0),
- PACAINITDATA(23, 0, NULL, 0, 0),
- PACAINITDATA(24, 0, NULL, 0, 0),
- PACAINITDATA(25, 0, NULL, 0, 0),
- PACAINITDATA(26, 0, NULL, 0, 0),
- PACAINITDATA(27, 0, NULL, 0, 0),
- PACAINITDATA(28, 0, NULL, 0, 0),
- PACAINITDATA(29, 0, NULL, 0, 0),
- PACAINITDATA(30, 0, NULL, 0, 0),
- PACAINITDATA(31, 0, NULL, 0, 0),
+ PACAINITDATA( 1, 0, 0, 0, 0),
+ PACAINITDATA( 2, 0, 0, 0, 0),
+ PACAINITDATA( 3, 0, 0, 0, 0),
+ PACAINITDATA( 4, 0, 0, 0, 0),
+ PACAINITDATA( 5, 0, 0, 0, 0),
+ PACAINITDATA( 6, 0, 0, 0, 0),
+ PACAINITDATA( 7, 0, 0, 0, 0),
+ PACAINITDATA( 8, 0, 0, 0, 0),
+ PACAINITDATA( 9, 0, 0, 0, 0),
+ PACAINITDATA(10, 0, 0, 0, 0),
+ PACAINITDATA(11, 0, 0, 0, 0),
+ PACAINITDATA(12, 0, 0, 0, 0),
+ PACAINITDATA(13, 0, 0, 0, 0),
+ PACAINITDATA(14, 0, 0, 0, 0),
+ PACAINITDATA(15, 0, 0, 0, 0),
+ PACAINITDATA(16, 0, 0, 0, 0),
+ PACAINITDATA(17, 0, 0, 0, 0),
+ PACAINITDATA(18, 0, 0, 0, 0),
+ PACAINITDATA(19, 0, 0, 0, 0),
+ PACAINITDATA(20, 0, 0, 0, 0),
+ PACAINITDATA(21, 0, 0, 0, 0),
+ PACAINITDATA(22, 0, 0, 0, 0),
+ PACAINITDATA(23, 0, 0, 0, 0),
+ PACAINITDATA(24, 0, 0, 0, 0),
+ PACAINITDATA(25, 0, 0, 0, 0),
+ PACAINITDATA(26, 0, 0, 0, 0),
+ PACAINITDATA(27, 0, 0, 0, 0),
+ PACAINITDATA(28, 0, 0, 0, 0),
+ PACAINITDATA(29, 0, 0, 0, 0),
+ PACAINITDATA(30, 0, 0, 0, 0),
+ PACAINITDATA(31, 0, 0, 0, 0),
#if NR_CPUS > 32
- PACAINITDATA(32, 0, NULL, 0, 0),
- PACAINITDATA(33, 0, NULL, 0, 0),
- PACAINITDATA(34, 0, NULL, 0, 0),
- PACAINITDATA(35, 0, NULL, 0, 0),
- PACAINITDATA(36, 0, NULL, 0, 0),
- PACAINITDATA(37, 0, NULL, 0, 0),
- PACAINITDATA(38, 0, NULL, 0, 0),
- PACAINITDATA(39, 0, NULL, 0, 0),
- PACAINITDATA(40, 0, NULL, 0, 0),
- PACAINITDATA(41, 0, NULL, 0, 0),
- PACAINITDATA(42, 0, NULL, 0, 0),
- PACAINITDATA(43, 0, NULL, 0, 0),
- PACAINITDATA(44, 0, NULL, 0, 0),
- PACAINITDATA(45, 0, NULL, 0, 0),
- PACAINITDATA(46, 0, NULL, 0, 0),
- PACAINITDATA(47, 0, NULL, 0, 0),
- PACAINITDATA(48, 0, NULL, 0, 0),
- PACAINITDATA(49, 0, NULL, 0, 0),
- PACAINITDATA(50, 0, NULL, 0, 0),
- PACAINITDATA(51, 0, NULL, 0, 0),
- PACAINITDATA(52, 0, NULL, 0, 0),
- PACAINITDATA(53, 0, NULL, 0, 0),
- PACAINITDATA(54, 0, NULL, 0, 0),
- PACAINITDATA(55, 0, NULL, 0, 0),
- PACAINITDATA(56, 0, NULL, 0, 0),
- PACAINITDATA(57, 0, NULL, 0, 0),
- PACAINITDATA(58, 0, NULL, 0, 0),
- PACAINITDATA(59, 0, NULL, 0, 0),
- PACAINITDATA(60, 0, NULL, 0, 0),
- PACAINITDATA(61, 0, NULL, 0, 0),
- PACAINITDATA(62, 0, NULL, 0, 0),
- PACAINITDATA(63, 0, NULL, 0, 0),
+ PACAINITDATA(32, 0, 0, 0, 0),
+ PACAINITDATA(33, 0, 0, 0, 0),
+ PACAINITDATA(34, 0, 0, 0, 0),
+ PACAINITDATA(35, 0, 0, 0, 0),
+ PACAINITDATA(36, 0, 0, 0, 0),
+ PACAINITDATA(37, 0, 0, 0, 0),
+ PACAINITDATA(38, 0, 0, 0, 0),
+ PACAINITDATA(39, 0, 0, 0, 0),
+ PACAINITDATA(40, 0, 0, 0, 0),
+ PACAINITDATA(41, 0, 0, 0, 0),
+ PACAINITDATA(42, 0, 0, 0, 0),
+ PACAINITDATA(43, 0, 0, 0, 0),
+ PACAINITDATA(44, 0, 0, 0, 0),
+ PACAINITDATA(45, 0, 0, 0, 0),
+ PACAINITDATA(46, 0, 0, 0, 0),
+ PACAINITDATA(47, 0, 0, 0, 0),
+ PACAINITDATA(48, 0, 0, 0, 0),
+ PACAINITDATA(49, 0, 0, 0, 0),
+ PACAINITDATA(50, 0, 0, 0, 0),
+ PACAINITDATA(51, 0, 0, 0, 0),
+ PACAINITDATA(52, 0, 0, 0, 0),
+ PACAINITDATA(53, 0, 0, 0, 0),
+ PACAINITDATA(54, 0, 0, 0, 0),
+ PACAINITDATA(55, 0, 0, 0, 0),
+ PACAINITDATA(56, 0, 0, 0, 0),
+ PACAINITDATA(57, 0, 0, 0, 0),
+ PACAINITDATA(58, 0, 0, 0, 0),
+ PACAINITDATA(59, 0, 0, 0, 0),
+ PACAINITDATA(60, 0, 0, 0, 0),
+ PACAINITDATA(61, 0, 0, 0, 0),
+ PACAINITDATA(62, 0, 0, 0, 0),
+ PACAINITDATA(63, 0, 0, 0, 0),
#if NR_CPUS > 64
- PACAINITDATA(64, 0, NULL, 0, 0),
- PACAINITDATA(65, 0, NULL, 0, 0),
- PACAINITDATA(66, 0, NULL, 0, 0),
- PACAINITDATA(67, 0, NULL, 0, 0),
- PACAINITDATA(68, 0, NULL, 0, 0),
- PACAINITDATA(69, 0, NULL, 0, 0),
- PACAINITDATA(70, 0, NULL, 0, 0),
- PACAINITDATA(71, 0, NULL, 0, 0),
- PACAINITDATA(72, 0, NULL, 0, 0),
- PACAINITDATA(73, 0, NULL, 0, 0),
- PACAINITDATA(74, 0, NULL, 0, 0),
- PACAINITDATA(75, 0, NULL, 0, 0),
- PACAINITDATA(76, 0, NULL, 0, 0),
- PACAINITDATA(77, 0, NULL, 0, 0),
- PACAINITDATA(78, 0, NULL, 0, 0),
- PACAINITDATA(79, 0, NULL, 0, 0),
- PACAINITDATA(80, 0, NULL, 0, 0),
- PACAINITDATA(81, 0, NULL, 0, 0),
- PACAINITDATA(82, 0, NULL, 0, 0),
- PACAINITDATA(83, 0, NULL, 0, 0),
- PACAINITDATA(84, 0, NULL, 0, 0),
- PACAINITDATA(85, 0, NULL, 0, 0),
- PACAINITDATA(86, 0, NULL, 0, 0),
- PACAINITDATA(87, 0, NULL, 0, 0),
- PACAINITDATA(88, 0, NULL, 0, 0),
- PACAINITDATA(89, 0, NULL, 0, 0),
- PACAINITDATA(90, 0, NULL, 0, 0),
- PACAINITDATA(91, 0, NULL, 0, 0),
- PACAINITDATA(92, 0, NULL, 0, 0),
- PACAINITDATA(93, 0, NULL, 0, 0),
- PACAINITDATA(94, 0, NULL, 0, 0),
- PACAINITDATA(95, 0, NULL, 0, 0),
- PACAINITDATA(96, 0, NULL, 0, 0),
- PACAINITDATA(97, 0, NULL, 0, 0),
- PACAINITDATA(98, 0, NULL, 0, 0),
- PACAINITDATA(99, 0, NULL, 0, 0),
- PACAINITDATA(100, 0, NULL, 0, 0),
- PACAINITDATA(101, 0, NULL, 0, 0),
- PACAINITDATA(102, 0, NULL, 0, 0),
- PACAINITDATA(103, 0, NULL, 0, 0),
- PACAINITDATA(104, 0, NULL, 0, 0),
- PACAINITDATA(105, 0, NULL, 0, 0),
- PACAINITDATA(106, 0, NULL, 0, 0),
- PACAINITDATA(107, 0, NULL, 0, 0),
- PACAINITDATA(108, 0, NULL, 0, 0),
- PACAINITDATA(109, 0, NULL, 0, 0),
- PACAINITDATA(110, 0, NULL, 0, 0),
- PACAINITDATA(111, 0, NULL, 0, 0),
- PACAINITDATA(112, 0, NULL, 0, 0),
- PACAINITDATA(113, 0, NULL, 0, 0),
- PACAINITDATA(114, 0, NULL, 0, 0),
- PACAINITDATA(115, 0, NULL, 0, 0),
- PACAINITDATA(116, 0, NULL, 0, 0),
- PACAINITDATA(117, 0, NULL, 0, 0),
- PACAINITDATA(118, 0, NULL, 0, 0),
- PACAINITDATA(119, 0, NULL, 0, 0),
- PACAINITDATA(120, 0, NULL, 0, 0),
- PACAINITDATA(121, 0, NULL, 0, 0),
- PACAINITDATA(122, 0, NULL, 0, 0),
- PACAINITDATA(123, 0, NULL, 0, 0),
- PACAINITDATA(124, 0, NULL, 0, 0),
- PACAINITDATA(125, 0, NULL, 0, 0),
- PACAINITDATA(126, 0, NULL, 0, 0),
- PACAINITDATA(127, 0, NULL, 0, 0),
+ PACAINITDATA(64, 0, 0, 0, 0),
+ PACAINITDATA(65, 0, 0, 0, 0),
+ PACAINITDATA(66, 0, 0, 0, 0),
+ PACAINITDATA(67, 0, 0, 0, 0),
+ PACAINITDATA(68, 0, 0, 0, 0),
+ PACAINITDATA(69, 0, 0, 0, 0),
+ PACAINITDATA(70, 0, 0, 0, 0),
+ PACAINITDATA(71, 0, 0, 0, 0),
+ PACAINITDATA(72, 0, 0, 0, 0),
+ PACAINITDATA(73, 0, 0, 0, 0),
+ PACAINITDATA(74, 0, 0, 0, 0),
+ PACAINITDATA(75, 0, 0, 0, 0),
+ PACAINITDATA(76, 0, 0, 0, 0),
+ PACAINITDATA(77, 0, 0, 0, 0),
+ PACAINITDATA(78, 0, 0, 0, 0),
+ PACAINITDATA(79, 0, 0, 0, 0),
+ PACAINITDATA(80, 0, 0, 0, 0),
+ PACAINITDATA(81, 0, 0, 0, 0),
+ PACAINITDATA(82, 0, 0, 0, 0),
+ PACAINITDATA(83, 0, 0, 0, 0),
+ PACAINITDATA(84, 0, 0, 0, 0),
+ PACAINITDATA(85, 0, 0, 0, 0),
+ PACAINITDATA(86, 0, 0, 0, 0),
+ PACAINITDATA(87, 0, 0, 0, 0),
+ PACAINITDATA(88, 0, 0, 0, 0),
+ PACAINITDATA(89, 0, 0, 0, 0),
+ PACAINITDATA(90, 0, 0, 0, 0),
+ PACAINITDATA(91, 0, 0, 0, 0),
+ PACAINITDATA(92, 0, 0, 0, 0),
+ PACAINITDATA(93, 0, 0, 0, 0),
+ PACAINITDATA(94, 0, 0, 0, 0),
+ PACAINITDATA(95, 0, 0, 0, 0),
+ PACAINITDATA(96, 0, 0, 0, 0),
+ PACAINITDATA(97, 0, 0, 0, 0),
+ PACAINITDATA(98, 0, 0, 0, 0),
+ PACAINITDATA(99, 0, 0, 0, 0),
+ PACAINITDATA(100, 0, 0, 0, 0),
+ PACAINITDATA(101, 0, 0, 0, 0),
+ PACAINITDATA(102, 0, 0, 0, 0),
+ PACAINITDATA(103, 0, 0, 0, 0),
+ PACAINITDATA(104, 0, 0, 0, 0),
+ PACAINITDATA(105, 0, 0, 0, 0),
+ PACAINITDATA(106, 0, 0, 0, 0),
+ PACAINITDATA(107, 0, 0, 0, 0),
+ PACAINITDATA(108, 0, 0, 0, 0),
+ PACAINITDATA(109, 0, 0, 0, 0),
+ PACAINITDATA(110, 0, 0, 0, 0),
+ PACAINITDATA(111, 0, 0, 0, 0),
+ PACAINITDATA(112, 0, 0, 0, 0),
+ PACAINITDATA(113, 0, 0, 0, 0),
+ PACAINITDATA(114, 0, 0, 0, 0),
+ PACAINITDATA(115, 0, 0, 0, 0),
+ PACAINITDATA(116, 0, 0, 0, 0),
+ PACAINITDATA(117, 0, 0, 0, 0),
+ PACAINITDATA(118, 0, 0, 0, 0),
+ PACAINITDATA(119, 0, 0, 0, 0),
+ PACAINITDATA(120, 0, 0, 0, 0),
+ PACAINITDATA(121, 0, 0, 0, 0),
+ PACAINITDATA(122, 0, 0, 0, 0),
+ PACAINITDATA(123, 0, 0, 0, 0),
+ PACAINITDATA(124, 0, 0, 0, 0),
+ PACAINITDATA(125, 0, 0, 0, 0),
+ PACAINITDATA(126, 0, 0, 0, 0),
+ PACAINITDATA(127, 0, 0, 0, 0),
#endif
#endif
};
*******************************************************************/
struct device_node;
typedef void *(*traverse_func)(struct device_node *me, void *data);
-void *traverse_pci_devices(struct device_node *start, traverse_func pre,
- void *data);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
+void *traverse_all_pci_devices(traverse_func pre);
void pci_devs_phb_init(void);
void pci_fix_bus_sysdata(void);
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "pci.h"
-/*
- * Traverse_func that inits the PCI fields of the device node.
+/* Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
*/
-static void * __init update_dn_pci_info(struct device_node *dn, void *data)
+static void * __init
+update_dn_pci_info(struct device_node *dn, void *data)
{
- struct pci_controller *phb = data;
+#ifdef CONFIG_PPC_PSERIES
+ struct pci_controller *phb = (struct pci_controller *)data;
u32 *regs;
- char *device_type = get_property(dn, "device_type", NULL);
+ char *device_type = get_property(dn, "device_type", 0);
char *model;
dn->phb = phb;
- if (device_type && (strcmp(device_type, "pci") == 0) &&
- (get_property(dn, "class-code", NULL) == 0)) {
+ if (device_type && strcmp(device_type, "pci") == 0 && get_property(dn, "class-code", 0) == 0) {
/* special case for PHB's. Sigh. */
- regs = (u32 *)get_property(dn, "bus-range", NULL);
+ regs = (u32 *)get_property(dn, "bus-range", 0);
dn->busno = regs[0];
model = (char *)get_property(dn, "model", NULL);
else
dn->devfn = 0; /* assumption */
} else {
- regs = (u32 *)get_property(dn, "reg", NULL);
+ regs = (u32 *)get_property(dn, "reg", 0);
if (regs) {
/* First register entry is addr (00BBSS00) */
dn->busno = (regs[0] >> 16) & 0xff;
dn->devfn = (regs[0] >> 8) & 0xff;
}
}
+#endif
return NULL;
}
-/*
+/******************************************************************
* Traverse a device tree stopping each PCI device in the tree.
* This is done depth first. As each node is processed, a "pre"
- * function is called and the children are processed recursively.
+ * function is called, the children are processed recursively, and
+ * then a "post" function is called.
*
- * The "pre" func returns a value. If non-zero is returned from
- * the "pre" func, the traversal stops and this value is returned.
- * This return value is useful when using traverse as a method of
- * finding a device.
+ * The "pre" and "post" funcs return a value. If non-zero
+ * is returned from the "pre" func, the traversal stops and this
+ * value is returned. The return value from "post" is not used.
+ * This return value is useful when using traverse as
+ * a method of finding a device.
*
- * NOTE: we do not run the func for devices that do not appear to
+ * NOTE: we do not run the funcs for devices that do not appear to
* be PCI except for the start node which we assume (this is good
* because the start node is often a phb which may be missing PCI
* properties).
* We use the class-code as an indicator. If we run into
* one of these nodes we also assume its siblings are non-pci for
* performance.
- */
-void *traverse_pci_devices(struct device_node *start, traverse_func pre,
- void *data)
+ *
+ ******************************************************************/
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data)
{
struct device_node *dn, *nextdn;
void *ret;
- if (pre && ((ret = pre(start, data)) != NULL))
+ if (pre && (ret = pre(start, data)) != NULL)
return ret;
for (dn = start->child; dn; dn = nextdn) {
nextdn = NULL;
- if (get_property(dn, "class-code", NULL)) {
- if (pre && ((ret = pre(dn, data)) != NULL))
+#ifdef CONFIG_PPC_PSERIES
+ if (get_property(dn, "class-code", 0)) {
+ if (pre && (ret = pre(dn, data)) != NULL)
return ret;
- if (dn->child)
+ if (dn->child) {
/* Depth first...do children */
nextdn = dn->child;
- else if (dn->sibling)
+ } else if (dn->sibling) {
/* ok, try next sibling instead. */
nextdn = dn->sibling;
+ } else {
+ /* no more children or siblings...call "post" */
+ if (post)
+ post(dn, data);
+ }
}
+#endif
if (!nextdn) {
/* Walk up to next valid sibling. */
do {
return NULL;
}
-/*
- * Same as traverse_pci_devices except this does it for all phbs.
+/* Same as traverse_pci_devices except this does it for all phbs.
*/
-static void *traverse_all_pci_devices(traverse_func pre)
+void *traverse_all_pci_devices(traverse_func pre)
{
- struct pci_controller *phb;
+ struct pci_controller* phb;
void *ret;
-
- for (phb = hose_head; phb; phb = phb->next)
- if ((ret = traverse_pci_devices(phb->arch_data, pre, phb))
- != NULL)
+ for (phb=hose_head;phb;phb=phb->next)
+ if ((ret = traverse_pci_devices((struct device_node *)phb->arch_data, pre, NULL, phb)) != NULL)
return ret;
return NULL;
}
-/*
- * Traversal func that looks for a <busno,devfcn> value.
+/* Traversal func that looks for a <busno,devfcn> value.
* If found, the device_node is returned (thus terminating the traversal).
*/
-static void *is_devfn_node(struct device_node *dn, void *data)
+static void *
+is_devfn_node(struct device_node *dn, void *data)
{
int busno = ((unsigned long)data >> 8) & 0xff;
int devfn = ((unsigned long)data) & 0xff;
- return ((devfn == dn->devfn) && (busno == dn->busno)) ? dn : NULL;
+ return (devfn == dn->devfn && busno == dn->busno) ? dn : NULL;
}
-/*
- * This is the "slow" path for looking up a device_node from a
+/* This is the "slow" path for looking up a device_node from a
* pci_dev. It will hunt for the device under its parent's
* phb and then update sysdata for a future fastpath.
*
*/
struct device_node *fetch_dev_dn(struct pci_dev *dev)
{
- struct device_node *orig_dn = dev->sysdata;
+ struct device_node *orig_dn = (struct device_node *)dev->sysdata;
struct pci_controller *phb = orig_dn->phb; /* assume same phb as orig_dn */
struct device_node *phb_dn;
struct device_node *dn;
unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
- phb_dn = phb->arch_data;
- dn = traverse_pci_devices(phb_dn, is_devfn_node, (void *)searchval);
+ phb_dn = (struct device_node *)(phb->arch_data);
+ dn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_node, NULL, (void *)searchval);
if (dn) {
dev->sysdata = dn;
/* ToDo: call some device init hook here */
EXPORT_SYMBOL(fetch_dev_dn);
-/*
+/******************************************************************
* Actually initialize the phbs.
* The buswalk on this phb has not happened yet.
- */
-void __init pci_devs_phb_init(void)
+ ******************************************************************/
+void __init
+pci_devs_phb_init(void)
{
/* This must be done first so the device nodes have valid pci info! */
traverse_all_pci_devices(update_dn_pci_info);
}
-static void __init pci_fixup_bus_sysdata_list(struct list_head *bus_list)
+static void __init
+pci_fixup_bus_sysdata_list(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
- for (ln = bus_list->next; ln != bus_list; ln = ln->next) {
+ for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
bus = pci_bus_b(ln);
if (bus->self)
bus->sysdata = bus->self->sysdata;
}
}
-/*
+/******************************************************************
* Fixup the bus->sysdata ptrs to point to the bus' device_node.
* This is done late in pcibios_init(). We do this mostly for
* sanity, but pci_dma.c uses these at DMA time so they must be
* To do this we recurse down the bus hierarchy. Note that PHB's
* have bus->self == NULL, but fortunately bus->sysdata is already
* correct in this case.
- */
-void __init pci_fix_bus_sysdata(void)
+ ******************************************************************/
+void __init
+pci_fix_bus_sysdata(void)
{
pci_fixup_bus_sysdata_list(&pci_root_buses);
}
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
{
struct proc_dir_entry *root;
- root = proc_mkdir("ppc64", NULL);
+ root = proc_mkdir("ppc64", 0);
if (!root)
return 1;
if (!proc_mkdir("rtas", root))
return 1;
- if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
+ if (!proc_symlink("rtas", 0, "ppc64/rtas"))
return 1;
return 0;
return (file->f_pos = new);
}
-static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t page_map_read( struct file *file, char *buf, size_t nbytes, loff_t *ppos)
{
+ unsigned pos = *ppos;
struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
- return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
+
+ if ( pos >= dp->size )
+ return 0;
+ if ( nbytes >= dp->size )
+ nbytes = dp->size;
+ if ( pos + nbytes > dp->size )
+ nbytes = dp->size - pos;
+
+ copy_to_user( buf, (char *)dp->data + pos, nbytes );
+ *ppos = pos + nbytes;
+ return nbytes;
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
/* prom structure */
struct prom_t prom;
-char *prom_display_paths[FB_MAX] __initdata = { NULL, };
+char *prom_display_paths[FB_MAX] __initdata = { 0, };
phandle prom_display_nodes[FB_MAX] __initdata;
unsigned int prom_num_displays = 0;
-char *of_stdout_device = NULL;
+char *of_stdout_device = 0;
static int iommu_force_on;
int ppc64_iommu_off;
#define MAX_PHB (32 * 6) /* 32 drawers * 6 PHBs/drawer */
struct of_tce_table of_tce_table[MAX_PHB + 1];
-char *bootpath = NULL;
-char *bootdevice = NULL;
+char *bootpath = 0;
+char *bootdevice = 0;
int boot_cpuid = 0;
#define MAX_CPU_THREADS 2
-struct device_node *allnodes = NULL;
+struct device_node *allnodes = 0;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
va_list list;
-
+
_prom->args.service = ADDR(service);
_prom->args.nargs = nargs;
_prom->args.nret = nret;
- _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
+ _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
- va_start(list, nret);
+ va_start(list, nret);
for (i=0; i < nargs; i++)
_prom->args.args[i] = va_arg(list, prom_arg_t);
- va_end(list);
+ va_end(list);
for (i=0; i < nret ;i++)
_prom->args.rets[i] = 0;
static void __init prom_print_hex(unsigned long val)
{
unsigned long offset = reloc_offset();
- int i, nibbles = sizeof(val)*2;
- char buf[sizeof(val)*2+1];
+ int i, nibbles = sizeof(val)*2;
+ char buf[sizeof(val)*2+1];
struct prom_t *_prom = PTRRELOC(&prom);
- for (i = nibbles-1; i >= 0; i--) {
- buf[i] = (val & 0xf) + '0';
- if (buf[i] > '9')
- buf[i] += ('a'-'0'-10);
- val >>= 4;
- }
- buf[nibbles] = '\0';
+ for (i = nibbles-1; i >= 0; i--) {
+ buf[i] = (val & 0xf) + '0';
+ if (buf[i] > '9')
+ buf[i] += ('a'-'0'-10);
+ val >>= 4;
+ }
+ buf[nibbles] = '\0';
call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
}
{
phandle node;
char type[64];
- unsigned long num_cpus = 0;
- unsigned long offset = reloc_offset();
+ unsigned long num_cpus = 0;
+ unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct naca_struct *_naca = RELOC(naca);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct naca_struct *_naca = RELOC(naca);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
/* NOTE: _naca->debug_switch is already initialized. */
prom_debug("prom_initialize_naca: start...\n");
_naca->pftSize = 0; /* ilog2 of htab size. computed below. */
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (!strcmp(type, RELOC("cpu"))) {
+ if (!strcmp(type, RELOC("cpu"))) {
num_cpus += 1;
/* We're assuming *all* of the CPUs have the same
_naca->pftSize = pft_size[1];
}
}
- } else if (!strcmp(type, RELOC("serial"))) {
+ } else if (!strcmp(type, RELOC("serial"))) {
phandle isa, pci;
struct isa_reg_property reg;
union pci_range ranges;
((((unsigned long)ranges.pci64.phys_hi) << 32) |
(ranges.pci64.phys_lo)) + reg.address;
}
- }
+ }
}
if (_systemcfg->platform == PLATFORM_POWERMAC)
}
/* We gotta have at least 1 cpu... */
- if ( (_systemcfg->processorCount = num_cpus) < 1 )
- PROM_BUG();
+ if ( (_systemcfg->processorCount = num_cpus) < 1 )
+ PROM_BUG();
_systemcfg->physicalMemorySize = lmb_phys_mem_size();
_systemcfg->version.minor = SYSTEMCFG_MINOR;
_systemcfg->processor = _get_PVR();
- prom_debug("systemcfg->processorCount = 0x%x\n",
+ prom_debug("systemcfg->processorCount = 0x%x\n",
_systemcfg->processorCount);
- prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
+ prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
_systemcfg->physicalMemorySize);
- prom_debug("naca->pftSize = 0x%x\n",
+ prom_debug("naca->pftSize = 0x%x\n",
_naca->pftSize);
- prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
_systemcfg->dCacheL1LineSize);
- prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
_systemcfg->iCacheL1LineSize);
- prom_debug("naca->serialPortAddr = 0x%x\n",
+ prom_debug("naca->serialPortAddr = 0x%x\n",
_naca->serialPortAddr);
- prom_debug("naca->interrupt_controller = 0x%x\n",
+ prom_debug("naca->interrupt_controller = 0x%x\n",
_naca->interrupt_controller);
- prom_debug("systemcfg->platform = 0x%x\n",
+ prom_debug("systemcfg->platform = 0x%x\n",
_systemcfg->platform);
prom_debug("prom_initialize_naca: end...\n");
}
#ifdef DEBUG_PROM
void prom_dump_lmb(void)
{
- unsigned long i;
- unsigned long offset = reloc_offset();
+ unsigned long i;
+ unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
- prom_printf("\nprom_dump_lmb:\n");
- prom_printf(" memory.cnt = 0x%x\n",
+ prom_printf("\nprom_dump_lmb:\n");
+ prom_printf(" memory.cnt = 0x%x\n",
_lmb->memory.cnt);
- prom_printf(" memory.size = 0x%x\n",
+ prom_printf(" memory.size = 0x%x\n",
_lmb->memory.size);
- for (i=0; i < _lmb->memory.cnt ;i++) {
- prom_printf(" memory.region[0x%x].base = 0x%x\n",
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ prom_printf(" memory.region[0x%x].base = 0x%x\n",
i, _lmb->memory.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->memory.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->memory.region[i].size);
- }
+ }
- prom_printf("\n reserved.cnt = 0x%x\n",
+ prom_printf("\n reserved.cnt = 0x%x\n",
_lmb->reserved.cnt);
- prom_printf(" reserved.size = 0x%x\n",
+ prom_printf(" reserved.size = 0x%x\n",
_lmb->reserved.size);
- for (i=0; i < _lmb->reserved.cnt ;i++) {
- prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
+ for (i=0; i < _lmb->reserved.cnt ;i++) {
+ prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
i, _lmb->reserved.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->reserved.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->reserved.region[i].size);
- }
+ }
}
#endif /* DEBUG_PROM */
{
phandle node;
char type[64];
- unsigned long i, offset = reloc_offset();
+ unsigned long i, offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
union lmb_reg_property reg;
unsigned long lmb_base, lmb_size;
unsigned long num_regs, bytes_per_reg = (_prom->encode_phys_size*2)/8;
if (_systemcfg->platform == PLATFORM_POWERMAC)
bytes_per_reg = 12;
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
- prom_getprop(node, "device_type", type, sizeof(type));
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("memory")))
+ if (strcmp(type, RELOC("memory")))
continue;
num_regs = prom_getprop(node, "reg", ®, sizeof(reg))
struct rtas_t *_rtas = PTRRELOC(&rtas);
struct systemcfg *_systemcfg = RELOC(systemcfg);
ihandle prom_rtas;
- u32 getprop_rval;
+ u32 getprop_rval;
char hypertas_funcs[4];
prom_debug("prom_instantiate_rtas: start...\n");
prom_getprop(prom_rtas, "rtas-size",
&getprop_rval, sizeof(getprop_rval));
- _rtas->size = getprop_rval;
+ _rtas->size = getprop_rval;
prom_printf("instantiating rtas");
if (_rtas->size != 0) {
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
prom_printf(" done\n");
}
- prom_debug("rtas->base = 0x%x\n", _rtas->base);
- prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
- prom_debug("rtas->size = 0x%x\n", _rtas->size);
+ prom_debug("rtas->base = 0x%x\n", _rtas->base);
+ prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
+ prom_debug("rtas->size = 0x%x\n", _rtas->size);
}
prom_debug("prom_instantiate_rtas: end...\n");
}
{
phandle node;
ihandle phb_node;
- unsigned long offset = reloc_offset();
+ unsigned long offset = reloc_offset();
char compatible[64], path[64], type[64], model[64];
unsigned long i, table = 0;
unsigned long base, vbase, align;
/* Keep the old logic in tack to avoid regression. */
if (compatible[0] != 0) {
- if ((strstr(compatible, RELOC("python")) == NULL) &&
- (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
- (strstr(compatible, RELOC("Winnipeg")) == NULL))
+ if((strstr(compatible, RELOC("python")) == NULL) &&
+ (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
+ (strstr(compatible, RELOC("Winnipeg")) == NULL))
continue;
} else if (model[0] != 0) {
if ((strstr(model, RELOC("ython")) == NULL) &&
/* Call OF to setup the TCE hardware */
if (call_prom("package-to-path", 3, 1, node,
path, sizeof(path)-1) == PROM_ERROR) {
- prom_printf("package-to-path failed\n");
- } else {
- prom_printf("opening PHB %s", path);
- }
-
- phb_node = call_prom("open", 1, 1, path);
- if ( (long)phb_node <= 0) {
- prom_printf("... failed\n");
- } else {
- prom_printf("... done\n");
- }
- call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
+ prom_printf("package-to-path failed\n");
+ } else {
+ prom_printf("opening PHB %s", path);
+ }
+
+ phb_node = call_prom("open", 1, 1, path);
+ if ( (long)phb_node <= 0) {
+ prom_printf("... failed\n");
+ } else {
+ prom_printf("... done\n");
+ }
+ call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
phb_node, -1, minsize,
(u32) base, (u32) (base >> 32));
- call_prom("close", 1, 0, phb_node);
+ call_prom("close", 1, 0, phb_node);
table++;
}
unsigned int cpu_threads, hw_cpu_num;
int propsize;
extern void __secondary_hold(void);
- extern unsigned long __secondary_hold_spinloop;
- extern unsigned long __secondary_hold_acknowledge;
- unsigned long *spinloop
+ extern unsigned long __secondary_hold_spinloop;
+ extern unsigned long __secondary_hold_acknowledge;
+ unsigned long *spinloop
= (void *)virt_to_abs(&__secondary_hold_spinloop);
- unsigned long *acknowledge
+ unsigned long *acknowledge
= (void *)virt_to_abs(&__secondary_hold_acknowledge);
- unsigned long secondary_hold
+ unsigned long secondary_hold
= virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
struct paca_struct *lpaca = PTRRELOC(&paca[0]);
struct prom_t *_prom = PTRRELOC(&prom);
#ifdef CONFIG_SMP
prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
- /* Set the common spinloop variable, so all of the secondary cpus
+ /* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
* This must occur for both SMP and non SMP kernels, since OF will
* be trashed when we move the kernel.
- */
- *spinloop = 0;
+ */
+ *spinloop = 0;
#ifdef CONFIG_HMT
for (i=0; i < NR_CPUS; i++) {
if (strcmp(type, RELOC("okay")) != 0)
continue;
- reg = -1;
+ reg = -1;
prom_getprop(node, "reg", ®, sizeof(reg));
path = (char *) mem;
ihandle prom_options = 0;
char option[9];
unsigned long offset = reloc_offset();
- struct naca_struct *_naca = RELOC(naca);
+ struct naca_struct *_naca = RELOC(naca);
char found = 0;
if (strstr(RELOC(cmd_line), RELOC("smt-enabled="))) {
struct prom_t *_prom = PTRRELOC(&prom);
u32 val;
- if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
- prom_panic("cannot find stdout");
+ if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
+ prom_panic("cannot find stdout");
- _prom->stdout = val;
+ _prom->stdout = val;
}
static int __init prom_find_machine_type(void)
ihandle ih;
int i, j;
unsigned long offset = reloc_offset();
- struct prom_t *_prom = PTRRELOC(&prom);
+ struct prom_t *_prom = PTRRELOC(&prom);
char type[16], *path;
static unsigned char default_colors[] = {
0x00, 0x00, 0x00,
break;
#endif /* CONFIG_LOGO_LINUX_CLUT224 */
}
-
+
return DOUBLEWORD_ALIGN(mem);
}
unsigned long needed, unsigned long align)
{
void *ret;
+ unsigned long offset = reloc_offset();
*mem_start = ALIGN(*mem_start, align);
if (*mem_start + needed > *mem_end) {
#ifdef CONFIG_BLK_DEV_INITRD
- unsigned long offset = reloc_offset();
/* FIXME: Apple OF doesn't map unclaimed mem. If this
* ever happened on G5, we'd need to fix. */
unsigned long initrd_len;
prom_panic("couldn't get device tree root\n");
}
allnextp = &RELOC(allnodes);
- inspect_node(root, NULL, &mem_start, &mem_end, &allnextp);
- *allnextp = NULL;
+ inspect_node(root, 0, &mem_start, &mem_end, &allnextp);
+ *allnextp = 0;
return mem_start;
}
{
struct bi_record *first, *last;
- prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
+ prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
if (bi_recs != NULL)
prom_debug(" tag=0x%x\n", bi_recs->tag);
last = (struct bi_record *)(long)bi_recs->data[0];
- prom_debug(" last=0x%x\n", (unsigned long)last);
+ prom_debug(" last=0x%x\n", (unsigned long)last);
if (last != NULL)
prom_debug(" last_tag=0x%x\n", last->tag);
return NULL;
first = (struct bi_record *)(long)last->data[0];
- prom_debug(" first=0x%x\n", (unsigned long)first);
+ prom_debug(" first=0x%x\n", (unsigned long)first);
if ( first == NULL || first != bi_recs )
return NULL;
/* Init prom stdout device */
prom_init_stdout();
- prom_debug("klimit=0x%x\n", RELOC(klimit));
- prom_debug("offset=0x%x\n", offset);
- prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("offset=0x%x\n", offset);
+ prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
/* check out if we have bi_recs */
_prom->bi_recs = prom_bi_rec_verify((struct bi_record *)r6);
copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
/* Start storing things at klimit */
- mem = RELOC(klimit) - offset;
+ mem = RELOC(klimit) - offset;
/* Get the full OF pathname of the stdout device */
p = (char *) mem;
_prom->encode_phys_size = (getprop_rval == 1) ? 32 : 64;
/* Determine which cpu is actually running right _now_ */
- if (prom_getprop(_prom->chosen, "cpu",
+ if (prom_getprop(_prom->chosen, "cpu",
&prom_cpu, sizeof(prom_cpu)) <= 0)
- prom_panic("cannot find boot cpu");
+ prom_panic("cannot find boot cpu");
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
RELOC(boot_cpuid) = 0;
- prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
+ prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
/* Get the boot device and translate it to a full OF pathname. */
p = (char *) mem;
if (_systemcfg->platform != PLATFORM_POWERMAC)
prom_instantiate_rtas();
- /* Initialize some system info into the Naca early... */
- prom_initialize_naca();
+ /* Initialize some system info into the Naca early... */
+ prom_initialize_naca();
smt_setup();
- /* If we are on an SMP machine, then we *MUST* do the
- * following, regardless of whether we have an SMP
- * kernel or not.
- */
+ /* If we are on an SMP machine, then we *MUST* do the
+ * following, regardless of whether we have an SMP
+ * kernel or not.
+ */
prom_hold_cpus(mem);
- prom_debug("after basic inits, mem=0x%x\n", mem);
+ prom_debug("after basic inits, mem=0x%x\n", mem);
#ifdef CONFIG_BLK_DEV_INITRD
prom_debug("initrd_start=0x%x\n", RELOC(initrd_start));
prom_debug("initrd_end=0x%x\n", RELOC(initrd_end));
RELOC(klimit) = mem + offset;
prom_debug("new klimit is\n");
- prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
prom_debug(" ->mem=0x%x\n", mem);
lmb_reserve(0, __pa(RELOC(klimit)));
* Find out the size of each entry of the interrupts property
* for a node.
*/
-int __devinit prom_n_intr_cells(struct device_node *np)
+static int __devinit
+prom_n_intr_cells(struct device_node *np)
{
struct device_node *p;
unsigned int *icp;
|| get_property(p, "interrupt-map", NULL) != NULL) {
printk("oops, node %s doesn't have #interrupt-cells\n",
p->full_name);
- return 1;
+ return 1;
}
}
#ifdef DEBUG_IRQ
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct pci_reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = pci_addrs[i].addr.a_hi;
adr[i].address = pci_addrs[i].addr.a_lo;
adr[i].size = pci_addrs[i].size_lo;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = rp[i].space;
adr[i].address = rp[i].address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= rpsize) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 0;
adr[i].address = rp[naddrc - 1];
adr[i].size = rp[naddrc + nsizec - 1];
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", NULL);
- np->type = get_property(np, "device_type", NULL);
+ np->name = get_property(np, "name", 0);
+ np->type = get_property(np, "device_type", 0);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start, measure_only);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
nsizec = *ip;
* expect for the name -- Cort
*/
if (!strcmp(np->name, "display"))
- np->name = get_property(np, "compatible", NULL);
+ np->name = get_property(np, "compatible", 0);
if (!strcmp(np->name, "device-tree") || np->parent == NULL)
ifunc = interpret_root_props;
return mem_start;
}
-/**
+/*
* finish_device_tree is called once things are running normally
* (i.e. with text and data mapped to the address they were linked at).
* It traverses the device tree and fills in the name, type,
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
return 1;
}
-/**
+/*
* Work out the sense (active-low level / active-high edge)
* of each interrupt from the device tree.
*/
}
}
-/**
+/*
* Construct and return a list of the device_nodes with a given name.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Construct and return a list of the device_nodes with a given type.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Returns all nodes linked together
*/
struct device_node *
*prevp = np;
prevp = &np->next;
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/** Checks if the given "compat" string matches one of the strings in
+/* Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int
}
-/**
+/*
* Indicates whether the root node has a given value in its
* compatible property.
*/
{
struct device_node *root;
int rc = 0;
-
+
root = of_find_node_by_path("/");
if (root) {
rc = device_is_compatible(root, compat);
return rc;
}
-/**
+/*
* Construct and return a list of the device_nodes with a given type
* and compatible property.
*/
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Find the device_node with a given full_name.
*/
struct device_node *
u32 *regs;
int err = 0;
phandle *ibm_phandle;
-
- node->name = get_property(node, "name", NULL);
- node->type = get_property(node, "device_type", NULL);
+
+ node->name = get_property(node, "name", 0);
+ node->type = get_property(node, "device_type", 0);
if (!parent) {
err = -ENODEV;
}
/* now do the work of finish_node_interrupts */
- if (get_property(node, "interrupts", NULL)) {
+ if (get_property(node, "interrupts", 0)) {
err = of_finish_dynamic_node_interrupts(node);
if (err) goto out;
}
- /* now do the rough equivalent of update_dn_pci_info, this
- * probably is not correct for phb's, but should work for
- * IOAs and slots.
- */
+ /* now do the rough equivalent of update_dn_pci_info, this
+ * probably is not correct for phb's, but should work for
+ * IOAs and slots.
+ */
- node->phb = parent->phb;
+ node->phb = parent->phb;
- regs = (u32 *)get_property(node, "reg", NULL);
- if (regs) {
- node->busno = (regs[0] >> 16) & 0xff;
- node->devfn = (regs[0] >> 8) & 0xff;
- }
+ regs = (u32 *)get_property(node, "reg", 0);
+ if (regs) {
+ node->busno = (regs[0] >> 16) & 0xff;
+ node->devfn = (regs[0] >> 8) & 0xff;
+ }
/* fixing up iommu_table */
- if (strcmp(node->name, "pci") == 0 &&
- get_property(node, "ibm,dma-window", NULL)) {
- node->bussubno = node->busno;
- iommu_devnode_init(node);
- } else
+ if(strcmp(node->name, "pci") == 0 &&
+ get_property(node, "ibm,dma-window", NULL)) {
+ node->bussubno = node->busno;
+ iommu_devnode_init(node);
+ }
+ else
node->iommu_table = parent->iommu_table;
out:
*lenp = pp->length;
return pp->value;
}
- return NULL;
+ return 0;
}
/*
break;
}
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message, (unsigned int __user *) data);
- break;
+
default:
ret = ptrace_request(child, request, addr, data);
#include <asm/rtas.h>
#include <asm/ppcdebug.h>
-static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
-static spinlock_t ras_log_buf_lock = SPIN_LOCK_UNLOCKED;
-
-static int ras_get_sensor_state_token;
-static int ras_check_exception_token;
-
-#define EPOW_SENSOR_TOKEN 9
-#define EPOW_SENSOR_INDEX 0
-#define RAS_VECTOR_OFFSET 0x500
-
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
struct pt_regs * regs);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
/* #define DEBUG */
-static void request_ras_irqs(struct device_node *np, char *propname,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- const char *name)
-{
- unsigned int *ireg, len, i;
- int virq, n_intr;
-
- ireg = (unsigned int *)get_property(np, propname, &len);
- if (ireg == NULL)
- return;
- n_intr = prom_n_intr_cells(np);
- len /= n_intr * sizeof(*ireg);
-
- for (i = 0; i < len; i++) {
- virq = virt_irq_create_mapping(*ireg);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- return;
- }
- if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
- printk(KERN_ERR "Unable to request interrupt %d for "
- "%s\n", irq_offset_up(virq), np->full_name);
- return;
- }
- ireg += n_intr;
- }
-}
-
/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
static int __init init_ras_IRQ(void)
{
struct device_node *np;
-
- ras_get_sensor_state_token = rtas_token("get-sensor-state");
- ras_check_exception_token = rtas_token("check-exception");
-
- /* Internal Errors */
- np = of_find_node_by_path("/event-sources/internal-errors");
- if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
- "RAS_ERROR");
- request_ras_irqs(np, "interrupts", ras_error_interrupt,
- "RAS_ERROR");
- of_node_put(np);
+ unsigned int *ireg, len, i;
+ int virq;
+
+ if ((np = of_find_node_by_path("/event-sources/internal-errors")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for (i=0; i<(len / sizeof(*ireg)); i++) {
+ virq = virt_irq_create_mapping(*(ireg));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ break;
+ }
+ request_irq(irq_offset_up(virq),
+ ras_error_interrupt, 0,
+ "RAS_ERROR", NULL);
+ ireg++;
+ }
}
-
- /* EPOW Events */
- np = of_find_node_by_path("/event-sources/epow-events");
- if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
- "RAS_EPOW");
- request_ras_irqs(np, "interrupts", ras_epow_interrupt,
- "RAS_EPOW");
- of_node_put(np);
+ of_node_put(np);
+
+ if ((np = of_find_node_by_path("/event-sources/epow-events")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for (i=0; i<(len / sizeof(*ireg)); i++) {
+ virq = virt_irq_create_mapping(*(ireg));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ " number for %s\n", np->full_name);
+ break;
+ }
+ request_irq(irq_offset_up(virq),
+ ras_epow_interrupt, 0,
+ "RAS_EPOW", NULL);
+ ireg++;
+ }
}
+ of_node_put(np);
return 1;
}
__initcall(init_ras_IRQ);
+static struct rtas_error_log log_buf;
+static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+
/*
* Handle power subsystem events (EPOW).
*
static irqreturn_t
ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
int status = 0xdeadbeef;
- int state = 0;
- int critical;
- status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
- EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
+ spin_lock(&log_lock);
- if (state > 3)
- critical = 1; /* Time Critical */
- else
- critical = 0;
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
+ 1, /* Time Critical */
+ __pa(&log_buf), size);
- spin_lock(&ras_log_buf_lock);
+ log_entry = log_buf;
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
- RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
- critical, __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
+ spin_unlock(&log_lock);
- udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
- printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
+ udbg_printf("EPOW <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
+ "EPOW <0x%lx 0x%x>\n",*((unsigned long *)&log_entry), status);
/* format and print the extended information */
- log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
-
- spin_unlock(&ras_log_buf_lock);
+ log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, 0);
+
return IRQ_HANDLED;
}
static irqreturn_t
ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct rtas_error_log *rtas_elog;
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
int status = 0xdeadbeef;
int fatal;
- spin_lock(&ras_log_buf_lock);
+ spin_lock(&log_lock);
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
- RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
- __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ RTAS_INTERNAL_ERROR,
+ 1, /* Time Critical */
+ __pa(&log_buf), size);
- rtas_elog = (struct rtas_error_log *)ras_log_buf;
+ log_entry = log_buf;
- if ((status == 0) && (rtas_elog->severity >= SEVERITY_ERROR_SYNC))
+ spin_unlock(&log_lock);
+
+ if ((status == 0) && (log_entry.severity >= SEVERITY_ERROR_SYNC))
fatal = 1;
else
fatal = 0;
/* format and print the extended information */
- log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
+ log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
- udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
+ udbg_printf("HW Error <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_EMERG
+ "Error: Fatal hardware error <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
#ifndef DEBUG
/* Don't actually power off when debugging so we can test
#endif
} else {
udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_WARNING
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
"Warning: Recoverable hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
+ *((unsigned long *)&log_entry), status);
}
-
- spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
#include <linux/time.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
/* ****************************************************************** */
/* Declarations */
-static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
-static int ppc_rtas_clock_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_clock_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_progress_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_progress_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_poweron_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-
-static ssize_t ppc_rtas_tone_freq_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_tone_volume_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
-static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
-
-static int sensors_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_sensors_show, NULL);
-}
-
-struct file_operations ppc_rtas_sensors_operations = {
- .open = sensors_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int poweron_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_poweron_show, NULL);
-}
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
struct file_operations ppc_rtas_poweron_operations = {
- .open = poweron_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_poweron_write,
- .release = single_release,
+ .read = ppc_rtas_poweron_read,
+ .write = ppc_rtas_poweron_write
};
-
-static int progress_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_progress_show, NULL);
-}
-
struct file_operations ppc_rtas_progress_operations = {
- .open = progress_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_progress_write,
- .release = single_release,
+ .read = ppc_rtas_progress_read,
+ .write = ppc_rtas_progress_write
};
-static int clock_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_clock_show, NULL);
-}
-
struct file_operations ppc_rtas_clock_operations = {
- .open = clock_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_clock_write,
- .release = single_release,
+ .read = ppc_rtas_clock_read,
+ .write = ppc_rtas_clock_write
};
-static int tone_freq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_tone_freq_show, NULL);
-}
-
struct file_operations ppc_rtas_tone_freq_operations = {
- .open = tone_freq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_tone_freq_write,
- .release = single_release,
+ .read = ppc_rtas_tone_freq_read,
+ .write = ppc_rtas_tone_freq_write
};
-
-static int tone_volume_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_tone_volume_show, NULL);
-}
-
struct file_operations ppc_rtas_tone_volume_operations = {
- .open = tone_volume_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_tone_volume_write,
- .release = single_release,
+ .read = ppc_rtas_tone_volume_read,
+ .write = ppc_rtas_tone_volume_write
};
-static int rmo_buf_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_rmo_buf_show, NULL);
-}
-
-struct file_operations ppc_rtas_rmo_buf_ops = {
- .open = rmo_buf_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static struct file_operations ppc_rtas_rmo_buf_ops = {
+ .read = ppc_rtas_rmo_buf_read,
};
-static int ppc_rtas_find_all_sensors(void);
-static void ppc_rtas_process_sensor(struct seq_file *m,
- struct individual_sensor *s, int state, int error, char *loc);
-static char *ppc_rtas_process_error(int error);
-static void get_location_code(struct seq_file *m,
- struct individual_sensor *s, char *loc);
-static void check_location_string(struct seq_file *m, char *c);
-static void check_location(struct seq_file *m, char *c);
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
static int __init proc_rtas_init(void)
{
if (entry)
entry->proc_fops = &ppc_rtas_poweron_operations;
- entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
- if (entry)
- entry->proc_fops = &ppc_rtas_sensors_operations;
+ create_proc_read_entry("ppc64/rtas/sensors", S_IRUGO, NULL,
+ ppc_rtas_sensor_read, NULL);
entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
NULL);
__initcall(proc_rtas_init);
-static int parse_number(const char __user *p, size_t count, unsigned long *val)
-{
- char buf[40];
- char *end;
-
- if (count > 39)
- return -EINVAL;
-
- if (copy_from_user(buf, p, count))
- return -EFAULT;
-
- buf[count] = 0;
-
- *val = simple_strtoul(buf, &end, 10);
- if (*end && *end != '\n')
- return -EINVAL;
-
- return 0;
-}
-
/* ****************************************************************** */
/* POWER-ON-TIME */
/* ****************************************************************** */
-static ssize_t ppc_rtas_poweron_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- int error = parse_number(buf, count, &nowtime);
- if (error)
- return error;
+ char *dest;
+ int error;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ nowtime = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
power_on_time = nowtime; /* save the time */
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting poweron time returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
+ int n, sn;
if (power_on_time == 0)
- seq_printf(m, "Power on time not set\n");
+ n = scnprintf(stkbuf,sizeof(stkbuf),"Power on time not set\n");
else
- seq_printf(m, "%lu\n",power_on_time);
- return 0;
+ n = scnprintf(stkbuf,sizeof(stkbuf),"%lu\n",power_on_time);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* PROGRESS */
/* ****************************************************************** */
-static ssize_t ppc_rtas_progress_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
unsigned long hex;
- if (count >= MAX_LINELENGTH)
- count = MAX_LINELENGTH -1;
- if (copy_from_user(progress_led, buf, count)) { /* save the string */
+ if (count >= MAX_LINELENGTH) count = MAX_LINELENGTH -1;
+ if (copy_from_user (progress_led, buf, count)) { /* save the string */
return -EFAULT;
}
progress_led[count] = 0;
ppc_md.progress ((char *)progress_led, hex);
return count;
- /* clear the line */
- /* ppc_md.progress(" ", 0xffff);*/
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
}
/* ****************************************************************** */
-static int ppc_rtas_progress_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- if (progress_led)
- seq_printf(m, "%s\n", progress_led);
- return 0;
+ int sn, n = 0;
+ char *tmpbuf;
+
+ if (progress_led == NULL) return 0;
+
+ tmpbuf = kmalloc (MAX_LINELENGTH, GFP_KERNEL);
+ if (!tmpbuf) {
+ printk(KERN_ERR "error: kmalloc failed\n");
+ return -ENOMEM;
+ }
+ n = sprintf (tmpbuf, "%s\n", progress_led);
+
+ sn = strlen (tmpbuf) +1;
+ if (*ppos >= sn) {
+ kfree (tmpbuf);
+ return 0;
+ }
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, tmpbuf + (*ppos), n)) {
+ kfree (tmpbuf);
+ return -EFAULT;
+ }
+ kfree (tmpbuf);
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* CLOCK */
/* ****************************************************************** */
-static ssize_t ppc_rtas_clock_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- int error = parse_number(buf, count, &nowtime);
- if (error)
- return error;
+ char *dest;
+ int error;
+
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ nowtime = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting the clock returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_clock_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
+ unsigned int year, mon, day, hour, min, sec;
int ret[8];
- int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
+ int n, sn, error;
+ char stkbuf[40]; /* its small, its on stack */
- if (error) {
+ error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
printk(KERN_WARNING "error: reading the clock returned: %s\n",
ppc_rtas_process_error(error));
- seq_printf(m, "0");
+ n = scnprintf (stkbuf, sizeof(stkbuf), "0");
} else {
- unsigned int year, mon, day, hour, min, sec;
- year = ret[0]; mon = ret[1]; day = ret[2];
- hour = ret[3]; min = ret[4]; sec = ret[5];
- seq_printf(m, "%lu\n",
+ n = scnprintf (stkbuf, sizeof(stkbuf), "%lu\n",
mktime(year, mon, day, hour, min, sec));
}
- return 0;
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* SENSOR STUFF */
/* ****************************************************************** */
-static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
{
- int i,j;
+ int i,j,n;
int state, error;
+ char *buffer;
int get_sensor_state = rtas_token("get-sensor-state");
- seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
- seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
- seq_printf(m, "********************************************************\n");
+ if (count < 0)
+ return -EINVAL;
+
+ /* May not be enough */
+ buffer = kmalloc(MAX_LINELENGTH*MAX_SENSORS, GFP_KERNEL);
+
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(buffer, 0, MAX_LINELENGTH*MAX_SENSORS);
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
if (ppc_rtas_find_all_sensors() != 0) {
- seq_printf(m, "\nNo sensors are available\n");
- return 0;
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
}
for (i=0; i<sensors.quant; i++) {
- struct individual_sensor *p = &sensors.sensor[i];
- char rstr[64];
- char *loc;
- int llen, offs;
-
- sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
- loc = (char *) get_property(rtas_node, rstr, &llen);
-
+ j = sensors.sensor[i].quant;
/* A sensor may have multiple instances */
- for (j = 0, offs = 0; j <= p->quant; j++) {
+ while (j >= 0) {
+
error = rtas_call(get_sensor_state, 2, 2, &state,
- p->token, j);
-
- ppc_rtas_process_sensor(m, p, state, error, loc);
- seq_putc(m, '\n');
- if (loc) {
- offs += strlen(loc) + 1;
- loc += strlen(loc) + 1;
- if (offs >= llen)
- loc = NULL;
- }
- }
+ sensors.sensor[i].token,
+ sensors.sensor[i].quant - j);
+
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state,
+ error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ kfree(buffer);
+ return 0;
}
- return 0;
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ kfree(buffer);
+ return n;
}
/* ****************************************************************** */
-static int ppc_rtas_find_all_sensors(void)
+int ppc_rtas_find_all_sensors (void)
{
unsigned int *utmp;
int len, i;
/*
* Builds a string of what rtas returned
*/
-static char *ppc_rtas_process_error(int error)
+char * ppc_rtas_process_error(int error)
{
switch (error) {
case SENSOR_CRITICAL_HIGH:
* Builds a string out of what the sensor said
*/
-static void ppc_rtas_process_sensor(struct seq_file *m,
- struct individual_sensor *s, int state, int error, char *loc)
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
{
/* Defined return vales */
const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
int num_states = 0;
int temperature = 0;
int unknown = 0;
+ int n = 0;
/* What kind of sensor do we have here? */
- switch (s->token) {
+ switch (s.token) {
case KEY_SWITCH:
- seq_printf(m, "Key switch:\t");
+ n += sprintf(buf+n, "Key switch:\t");
num_states = sizeof(key_switch) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", key_switch[state]);
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
have_strings = 1;
}
break;
case ENCLOSURE_SWITCH:
- seq_printf(m, "Enclosure switch:\t");
+ n += sprintf(buf+n, "Enclosure switch:\t");
num_states = sizeof(enclosure_switch) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
enclosure_switch[state]);
have_strings = 1;
}
break;
case THERMAL_SENSOR:
- seq_printf(m, "Temp. (°C/°F):\t");
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
temperature = 1;
break;
case LID_STATUS:
- seq_printf(m, "Lid status:\t");
+ n += sprintf(buf+n, "Lid status:\t");
num_states = sizeof(lid_status) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", lid_status[state]);
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
have_strings = 1;
}
break;
case POWER_SOURCE:
- seq_printf(m, "Power source:\t");
+ n += sprintf(buf+n, "Power source:\t");
num_states = sizeof(power_source) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
power_source[state]);
have_strings = 1;
}
break;
case BATTERY_VOLTAGE:
- seq_printf(m, "Battery voltage:\t");
+ n += sprintf(buf+n, "Battery voltage:\t");
break;
case BATTERY_REMAINING:
- seq_printf(m, "Battery remaining:\t");
+ n += sprintf(buf+n, "Battery remaining:\t");
num_states = sizeof(battery_remaining) / sizeof(char *);
if (state < num_states)
{
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_remaining[state]);
have_strings = 1;
}
break;
case BATTERY_PERCENTAGE:
- seq_printf(m, "Battery percentage:\t");
+ n += sprintf(buf+n, "Battery percentage:\t");
break;
case EPOW_SENSOR:
- seq_printf(m, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "EPOW Sensor:\t");
num_states = sizeof(epow_sensor) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", epow_sensor[state]);
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
have_strings = 1;
}
break;
case BATTERY_CYCLESTATE:
- seq_printf(m, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "Battery cyclestate:\t");
num_states = sizeof(battery_cyclestate) /
sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_cyclestate[state]);
have_strings = 1;
}
break;
case BATTERY_CHARGING:
- seq_printf(m, "Battery Charging:\t");
+ n += sprintf(buf+n, "Battery Charging:\t");
num_states = sizeof(battery_charging) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_charging[state]);
have_strings = 1;
}
break;
case IBM_SURVEILLANCE:
- seq_printf(m, "Surveillance:\t");
+ n += sprintf(buf+n, "Surveillance:\t");
break;
case IBM_FANRPM:
- seq_printf(m, "Fan (rpm):\t");
+ n += sprintf(buf+n, "Fan (rpm):\t");
break;
case IBM_VOLTAGE:
- seq_printf(m, "Voltage (mv):\t");
+ n += sprintf(buf+n, "Voltage (mv):\t");
break;
case IBM_DRCONNECTOR:
- seq_printf(m, "DR connector:\t");
+ n += sprintf(buf+n, "DR connector:\t");
num_states = sizeof(ibm_drconnector) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
ibm_drconnector[state]);
have_strings = 1;
}
break;
case IBM_POWERSUPPLY:
- seq_printf(m, "Powersupply:\t");
+ n += sprintf(buf+n, "Powersupply:\t");
break;
case IBM_INTQUEUE:
- seq_printf(m, "Interrupt queue:\t");
+ n += sprintf(buf+n, "Interrupt queue:\t");
num_states = sizeof(ibm_intqueue) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
ibm_intqueue[state]);
have_strings = 1;
}
break;
default:
- seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
- s->token);
+ n += sprintf(buf+n, "Unknown sensor (type %d), ignoring it\n",
+ s.token);
unknown = 1;
have_strings = 1;
break;
}
if (have_strings == 0) {
if (temperature) {
- seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
} else
- seq_printf(m, "%10d\t", state);
+ n += sprintf(buf+n, "%10d\t", state);
}
if (unknown == 0) {
- seq_printf(m, "%s\t", ppc_rtas_process_error(error));
- get_location_code(m, s, loc);
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
}
+ return n;
}
/* ****************************************************************** */
-static void check_location(struct seq_file *m, char *c)
+int check_location (char *c, int idx, char * buf)
{
- switch (c[0]) {
+ int n = 0;
+
+ switch (*(c+idx)) {
case LOC_PLANAR:
- seq_printf(m, "Planar #%c", c[1]);
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
break;
case LOC_CPU:
- seq_printf(m, "CPU #%c", c[1]);
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
break;
case LOC_FAN:
- seq_printf(m, "Fan #%c", c[1]);
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
break;
case LOC_RACKMOUNTED:
- seq_printf(m, "Rack #%c", c[1]);
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
break;
case LOC_VOLTAGE:
- seq_printf(m, "Voltage #%c", c[1]);
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
break;
case LOC_LCD:
- seq_printf(m, "LCD #%c", c[1]);
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
break;
case '.':
- seq_printf(m, "- %c", c[1]);
- break;
+ n += sprintf ( buf, "- %c", *(c+idx+1));
default:
- seq_printf(m, "Unknown location");
+ n += sprintf ( buf, "Unknown location");
break;
}
+ return n;
}
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbrevation
*/
-static void check_location_string(struct seq_file *m, char *c)
+int check_location_string (char *c, char *buf)
{
- while (*c) {
- if (isalpha(*c) || *c == '.')
- check_location(m, c);
- else if (*c == '/' || *c == '-')
- seq_printf(m, " at ");
- c++;
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
}
+ return n;
}
/* ****************************************************************** */
-static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc)
+int get_location_code(struct individual_sensor s, char * buffer)
{
- if (!loc || !*loc) {
- seq_printf(m, "---");/* does not have a location */
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas_node, rstr, &llen);
+
+ n=0;
+ if (ret == NULL || ret[0] == '\0') {
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
} else {
- check_location_string(m, loc);
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ scnprintf(t, sizeof(t), "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
}
- seq_putc(m, ' ');
+ return n;
}
/* ****************************************************************** */
/* INDICATORS - Tone Frequency */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_freq_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
unsigned long freq;
- int error = parse_number(buf, count, &freq);
- if (error)
- return error;
+ char *dest;
+ int error;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ freq = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
rtas_tone_frequency = freq; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_FREQUENCY, 0, freq);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%lu\n", rtas_tone_frequency);
- return 0;
+ int n, sn;
+ char stkbuf[40]; /* its small, its on stack */
+
+ n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_frequency);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* INDICATORS - Tone Volume */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_volume_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
unsigned long volume;
- int error = parse_number(buf, count, &volume);
- if (error)
- return error;
+ char *dest;
+ int error;
- if (volume > 100)
- volume = 100;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ volume = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
rtas_tone_volume = volume; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_VOLUME, 0, volume);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting tone volume returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%lu\n", rtas_tone_volume);
- return 0;
+ int n, sn;
+ char stkbuf[40]; /* its small, its on stack */
+
+ n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_volume);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
#define RMO_READ_BUF_MAX 30
/* RTAS Userspace access */
-static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
- return 0;
+ char kbuf[RMO_READ_BUF_MAX];
+ int n;
+
+ n = sprintf(kbuf, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
+ if (n > count)
+ n = count;
+
+ if (ppos && *ppos != 0)
+ return 0;
+
+ if (copy_to_user(buf, kbuf, n))
+ return -EFAULT;
+
+ if (ppos)
+ *ppos = n;
+
+ return n;
}
#include <asm/delay.h>
#include <asm/uaccess.h>
-struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
+struct flash_block_list_header rtas_firmware_flash_list = {0, 0};
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
if (f->next)
f->next = (struct flash_block_list *)virt_to_abs(f->next);
else
- f->next = NULL;
+ f->next = 0LL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
}
BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
- printk("cpu %u (hwid %u) Ready to die...\n",
+ printk("%u %u Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(rtas_args));
#define DEBUG(A...)
#endif
-static spinlock_t rtasd_log_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait);
if (buf == NULL)
return;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
/* get length and increase count */
switch (err_type & ERR_TYPE_MASK) {
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
/* Check to see if we need to or have stopped logging */
if (fatal || no_more_logging) {
no_more_logging = 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
else
rtas_log_start += 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
return -ENOMEM;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
/* if it's 0, then we know we got the last one (the one in NVRAM) */
if (rtas_log_size == 0 && !no_more_logging)
nvram_clear_error_log();
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
if (error)
goto out;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK);
memcpy(tmp, &rtas_log_buf[offset], count);
rtas_log_start += 1;
rtas_log_size -= 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
error = copy_to_user(buf, tmp, count) ? -EFAULT : count;
out:
else
printk(KERN_ERR "Failed to create error_log proc entry\n");
- if (kernel_thread(rtasd, NULL, CLONE_FS) < 0)
+ if (kernel_thread(rtasd, 0, CLONE_FS) < 0)
printk(KERN_ERR "Failed to start RTAS daemon\n");
return 0;
* ioctls.
*/
-static ssize_t rtc_read(struct file *file, char __user *buf,
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin);
+
+static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos);
static int rtc_ioctl(struct inode *inode, struct file *file,
* Now all the various file operations that we export.
*/
-static ssize_t rtc_read(struct file *file, char __user *buf,
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin)
+{
+ return -ESPIPE;
+}
+
+static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
return -EIO;
if (!capable(CAP_SYS_TIME))
return -EACCES;
- if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
+ if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
sizeof(struct rtc_time)))
return -EFAULT;
}
case RTC_EPOCH_READ: /* Read the epoch. */
{
- return put_user (epoch, (unsigned long __user *)arg);
+ return put_user (epoch, (unsigned long *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
default:
return -EINVAL;
}
- return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
return 0;
}
*/
static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = rtc_llseek,
.read = rtc_read,
.ioctl = rtc_ioctl,
.open = rtc_open,
return retval;
#ifdef CONFIG_PROC_FS
- if (create_proc_read_entry ("driver/rtc", 0, NULL, rtc_read_proc, NULL) == NULL)
+ if(create_proc_read_entry ("driver/rtc", 0, 0, rtc_read_proc, NULL) == NULL)
misc_deregister(&rtc_dev);
return -ENOMEM;
#endif
void cpu_die(void)
{
local_irq_disable();
- /* Some hardware requires clearing the CPPR, while other hardware does not
- * it is safe either way
- */
- pSeriesLP_cppr_info(0, 0);
rtas_stop_self();
/* Should never get here... */
BUG();
/* Fixup atomic count: it exited inside IRQ handler. */
paca[lcpu].__current->thread_info->preempt_count = 0;
+ /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
+ paca[lcpu].stab_next_rr = 0;
/* At boot this is done in prom.c. */
paca[lcpu].hw_cpu_id = pcpu;
}
maxcpus = ireg[num_addr_cell + num_size_cell];
-
- /* Double maxcpus for processors which have SMT capability */
- if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
- maxcpus *= 2;
-
+ /* DRENG need to account for threads here too */
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
- debugger(NULL);
+ debugger(0);
goto out;
}
}
smp_processor_id(),
atomic_read(&data.finished),
atomic_read(&data.started));
- debugger(NULL);
+ debugger(0);
goto out;
}
}
if (smp_ops->give_timebase)
smp_ops->give_timebase();
-
- /* Wait until cpu puts itself in the online map */
- while (!cpu_online(cpu))
- cpu_relax();
-
+ cpu_set(cpu, cpu_online_map);
return 0;
}
#endif
#endif
- spin_lock(&call_lock);
- cpu_set(cpu, cpu_online_map);
- spin_unlock(&call_lock);
-
local_irq_enable();
return cpu_idle(NULL);
#include <asm/naca.h>
#include <asm/cputable.h>
-static int make_ste(unsigned long stab, unsigned long esid,
- unsigned long vsid);
+static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
+static void make_slbe(unsigned long esid, unsigned long vsid, int large,
+ int kernel_segment);
-void slb_initialize(void);
+static inline void slb_add_bolted(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+ unsigned long esid = GET_ESID(VMALLOCBASE);
+ unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * Bolt in the first vmalloc segment. Since modules end
+ * up there it gets hit very heavily.
+ */
+ get_paca()->stab_next_rr = 1;
+ make_slbe(esid, vsid, 0, 1);
+#endif
+}
/*
* Build an entry for the base kernel segment and put it into
*/
void stab_initialize(unsigned long stab)
{
- unsigned long vsid = get_kernel_vsid(KERNELBASE);
+ unsigned long esid, vsid;
+ int seg0_largepages = 0;
+
+ esid = GET_ESID(KERNELBASE);
+ vsid = get_kernel_vsid(esid << SID_SHIFT);
+
+ if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
+ seg0_largepages = 1;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
- slb_initialize();
+ /* Invalidate the entire SLB & all the ERATS */
+#ifdef CONFIG_PPC_ISERIES
+ asm volatile("isync; slbia; isync":::"memory");
+#else
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ asm volatile("isync; slbia; isync":::"memory");
+ get_paca()->stab_next_rr = 0;
+ make_slbe(esid, vsid, seg0_largepages, 1);
+ asm volatile("isync":::"memory");
+#endif
+
+ slb_add_bolted();
} else {
asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, GET_ESID(KERNELBASE), vsid);
+ make_ste(stab, esid, vsid);
/* Order update */
asm volatile("sync":::"memory");
* Could not find empty entry, pick one with a round robin selection.
* Search all entries in the two groups.
*/
- castout_entry = get_paca()->stab_rr;
+ castout_entry = get_paca()->stab_next_rr;
for (i = 0; i < 16; i++) {
if (castout_entry < 8) {
global_entry = (esid & 0x1f) << 3;
castout_entry = (castout_entry + 1) & 0xf;
}
- get_paca()->stab_rr = (castout_entry + 1) & 0xf;
+ get_paca()->stab_next_rr = (castout_entry + 1) & 0xf;
/* Modify the old entry to the new value. */
preload_stab(tsk, mm);
}
+
+/*
+ * SLB stuff
+ */
+
+/*
+ * Create a segment buffer entry for the given esid/vsid pair.
+ *
+ * NOTE: A context syncronising instruction is required before and after
+ * this, in the common case we use exception entry and rfid.
+ */
+static void make_slbe(unsigned long esid, unsigned long vsid, int large,
+ int kernel_segment)
+{
+ unsigned long entry, castout_entry;
+ union {
+ unsigned long word0;
+ slb_dword0 data;
+ } esid_data;
+ union {
+ unsigned long word0;
+ slb_dword1 data;
+ } vsid_data;
+ struct paca_struct *lpaca = get_paca();
+
+ /*
+ * We take the next entry, round robin. Previously we tried
+ * to find a free slot first but that took too long. Unfortunately
+ * we dont have any LRU information to help us choose a slot.
+ */
+
+ /*
+ * Never cast out the segment for our kernel stack. Since we
+ * dont invalidate the ERAT we could have a valid translation
+ * for the kernel stack during the first part of exception exit
+ * which gets invalidated due to a tlbie from another cpu at a
+ * non recoverable point (after setting srr0/1) - Anton
+ *
+ * paca Ksave is always valid (even when on the interrupt stack)
+ * so we use that.
+ */
+ castout_entry = lpaca->stab_next_rr;
+ do {
+ entry = castout_entry;
+ castout_entry++;
+ /*
+ * We bolt in the first kernel segment and the first
+ * vmalloc segment.
+ */
+ if (castout_entry >= SLB_NUM_ENTRIES)
+ castout_entry = 2;
+ asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
+ } while (esid_data.data.v &&
+ esid_data.data.esid == GET_ESID(lpaca->kstack));
+
+ lpaca->stab_next_rr = castout_entry;
+
+ /* slbie not needed as the previous mapping is still valid. */
+
+ /*
+ * Write the new SLB entry.
+ */
+ vsid_data.word0 = 0;
+ vsid_data.data.vsid = vsid;
+ vsid_data.data.kp = 1;
+ if (large)
+ vsid_data.data.l = 1;
+ if (kernel_segment)
+ vsid_data.data.c = 1;
+ else
+ vsid_data.data.ks = 1;
+
+ esid_data.word0 = 0;
+ esid_data.data.esid = esid;
+ esid_data.data.v = 1;
+ esid_data.data.index = entry;
+
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing.
+ */
+ asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data));
+}
+
+static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
+ mm_context_t context)
+{
+ int large = 0;
+ int region_id = REGION_ID(esid << SID_SHIFT);
+ unsigned long offset;
+
+ if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
+ if (region_id == KERNEL_REGION_ID)
+ large = 1;
+ else if (region_id == USER_REGION_ID)
+ large = in_hugepage_area(context, esid << SID_SHIFT);
+ }
+
+ make_slbe(esid, vsid, large, region_id != USER_REGION_ID);
+
+ if (region_id != USER_REGION_ID)
+ return;
+
+ offset = __get_cpu_var(stab_cache_ptr);
+ if (offset < NR_STAB_CACHE_ENTRIES)
+ __get_cpu_var(stab_cache[offset++]) = esid;
+ else
+ offset = NR_STAB_CACHE_ENTRIES+1;
+ __get_cpu_var(stab_cache_ptr) = offset;
+}
+
+/*
+ * Allocate a segment table entry for the given ea.
+ */
+int slb_allocate(unsigned long ea)
+{
+ unsigned long vsid, esid;
+ mm_context_t context;
+
+ /* Check for invalid effective addresses. */
+ if (unlikely(!IS_VALID_EA(ea)))
+ return 1;
+
+ /* Kernel or user address? */
+ if (REGION_ID(ea) >= KERNEL_REGION_ID) {
+ context = KERNEL_CONTEXT(ea);
+ vsid = get_kernel_vsid(ea);
+ } else {
+ if (unlikely(!current->mm))
+ return 1;
+
+ context = current->mm->context;
+ vsid = get_vsid(context.id, ea);
+ }
+
+ esid = GET_ESID(ea);
+#ifndef CONFIG_PPC_ISERIES
+ BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
+#endif
+ __slb_allocate(esid, vsid, context);
+
+ return 0;
+}
+
+/*
+ * preload some userspace segments into the SLB.
+ */
+static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long pc = KSTK_EIP(tsk);
+ unsigned long stack = KSTK_ESP(tsk);
+ unsigned long unmapped_base;
+ unsigned long pc_esid = GET_ESID(pc);
+ unsigned long stack_esid = GET_ESID(stack);
+ unsigned long unmapped_base_esid;
+ unsigned long vsid;
+
+ if (test_tsk_thread_flag(tsk, TIF_32BIT))
+ unmapped_base = TASK_UNMAPPED_BASE_USER32;
+ else
+ unmapped_base = TASK_UNMAPPED_BASE_USER64;
+
+ unmapped_base_esid = GET_ESID(unmapped_base);
+
+ if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, pc);
+ __slb_allocate(pc_esid, vsid, mm->context);
+
+ if (pc_esid == stack_esid)
+ return;
+
+ if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, stack);
+ __slb_allocate(stack_esid, vsid, mm->context);
+
+ if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
+ return;
+
+ if (!IS_VALID_EA(unmapped_base) ||
+ (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, unmapped_base);
+ __slb_allocate(unmapped_base_esid, vsid, mm->context);
+}
+
+/* Flush all user entries from the segment table of the current processor. */
+void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long offset = __get_cpu_var(stab_cache_ptr);
+ union {
+ unsigned long word0;
+ slb_dword0 data;
+ } esid_data;
+
+ if (offset <= NR_STAB_CACHE_ENTRIES) {
+ int i;
+ asm volatile("isync" : : : "memory");
+ for (i = 0; i < offset; i++) {
+ esid_data.word0 = 0;
+ esid_data.data.esid = __get_cpu_var(stab_cache[i]);
+ BUG_ON(esid_data.data.esid == GET_ESID(VMALLOCBASE));
+ asm volatile("slbie %0" : : "r" (esid_data));
+ }
+ asm volatile("isync" : : : "memory");
+ } else {
+ asm volatile("isync; slbia; isync" : : : "memory");
+ slb_add_bolted();
+ }
+
+ /* Workaround POWER5 < DD2.1 issue */
+ if (offset == 1 || offset > NR_STAB_CACHE_ENTRIES) {
+ /*
+ * flush segment in EEH region, we dont normally access
+ * addresses in this region.
+ */
+ esid_data.word0 = 0;
+ esid_data.data.esid = EEH_REGION_ID;
+ asm volatile("slbie %0" : : "r" (esid_data));
+ }
+
+ __get_cpu_var(stab_cache_ptr) = 0;
+
+ preload_slb(tsk, mm);
+}
#include <linux/ptrace.h>
#include <linux/aio_abi.h>
#include <linux/elf.h>
-#include <linux/vs_cvirt.h>
#include <net/scm.h>
#include <net/sock.h>
unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
return sprintf(buf, "%lx\n", val); \
} \
-static ssize_t __attribute_used__ \
- store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
+static ssize_t store_##NAME(struct sys_device *dev, const char *buf, \
+ size_t count) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
unsigned long val; \
viodev->dev.platform_data = of_node_get(of_node);
viodev->irq = NO_IRQ;
- irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", 0);
if (irq_p) {
int virq = virt_irq_create_mapping(*irq_p);
if (virq == NO_IRQ) {
#include <asm/naca.h>
#include <asm/rtas.h>
#include <asm/xics.h>
+#include <asm/ppcdebug.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
val64);
}
-void pSeriesLP_cppr_info(int n_cpu, u8 value)
+static void pSeriesLP_cppr_info(int n_cpu, u8 value)
{
unsigned long lpar_rc;
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int irq)
{
+ cpumask_t cpumask = irq_affinity[irq];
+ cpumask_t tmp = CPU_MASK_NONE;
unsigned int server;
#ifdef CONFIG_IRQ_ALL_CPUS
/* For the moment only implement delivery to all cpus or one cpu */
if (smp_threads_ready) {
- cpumask_t cpumask = irq_affinity[irq];
- cpumask_t tmp = CPU_MASK_NONE;
if (cpus_equal(cpumask, CPU_MASK_ALL)) {
server = default_distrib_server;
} else {
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_set_xive "
+ printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
"returned %x\n", irq, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_int_on "
+ printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
"returned %x\n", irq, call_status);
return;
}
call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_real_irq: irq=%d: "
+ printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
"ibm_int_off returned %x\n", irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
+ printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
" returned %x\n", irq, call_status);
return;
}
}
}
+extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
int xics_get_irq(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (irq == NO_IRQ) {
- printk(KERN_ERR "Interrupt %d (real) is invalid,"
+ printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
" disabling it.\n", vec);
xics_disable_real_irq(vec);
} else
#ifdef CONFIG_SMP
+extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
+
irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
ibm_int_off = rtas_token("ibm,int-off");
np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
- if (!np)
- panic("xics_init_IRQ: can't find interrupt presentation");
-
+ if (!np) {
+ printk(KERN_WARNING "Can't find Interrupt Presentation\n");
+ udbg_printf("Can't find Interrupt Presentation\n");
+ while (1);
+ }
nextnode:
- ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
+ ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
if (ireg) {
/*
* set node starting index for this node
}
ireg = (uint *)get_property(np, "reg", &ilen);
- if (!ireg)
- panic("xics_init_IRQ: can't find interrupt reg property");
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
+ udbg_printf("Can't find Interrupt Reg Property\n");
+ while (1);
+ }
while (ilen) {
inodes[indx].addr = (unsigned long long)*ireg++ << 32;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
- printk(KERN_WARNING "xics: no ISA interrupt controller\n");
+ printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
- ireg = (uint *) get_property(np, "interrupts", NULL);
- if (!ireg)
- panic("xics_init_IRQ: can't find ISA interrupts property");
-
+ ireg = (uint *) get_property(np, "interrupts", 0);
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
+ udbg_printf("Can't find ISA Interrupts Property\n");
+ while (1);
+ }
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
_PAGE_NO_CACHE);
#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_PSERIES
+ /* actually iSeries does not use any of xics...but it has link dependencies
+ * for now, except this new one...
+ */
} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
ops = &pSeriesLP_ops;
+#endif
}
xics_8259_pic.enable = i8259_pic.enable;
if (naca->interrupt_controller == IC_PPC_XIC &&
xics_irq_8259_cascade != -1) {
if (request_irq(irq_offset_up(xics_irq_8259_cascade),
- no_action, 0, "8259 cascade", NULL))
- printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
- "cascade\n");
+ no_action, 0, "8259 cascade", 0))
+ printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
i8259_init();
}
return 0;
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
- "IPI", NULL);
+ "IPI", 0);
get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
}
#endif
irq, newmask, xics_status[1]);
if (status) {
- printk(KERN_ERR "xics_set_affinity: irq=%d ibm,set-xive "
+ printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
"returns %d\n", irq, status);
return;
}
int set_indicator = rtas_token("set-indicator");
const unsigned int giqs = 9005UL; /* Global Interrupt Queue Server */
int status = 0;
- unsigned int irq, virq, cpu = smp_processor_id();
+ unsigned int irq, cpu = smp_processor_id();
+ int xics_status[2];
+ unsigned long flags;
BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
ops->cppr_info(cpu, DEFAULT_PRIORITY);
iosync();
- for_each_irq(virq) {
- irq_desc_t *desc;
- int xics_status[2];
- unsigned long flags;
-
- /* We cant set affinity on ISA interrupts */
- if (virq < irq_offset_value())
- continue;
-
- desc = get_irq_desc(virq);
- irq = virt_irq_to_real(irq_offset_down(virq));
+ printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
+ for_each_irq(irq) {
+ irq_desc_t *desc = get_irq_desc(irq);
/* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == NO_IRQ)
+ if (irq_offset_down(irq) == XICS_IPI)
continue;
/* We only need to migrate enabled IRQS */
if (status) {
printk(KERN_ERR "migrate_irqs_away: irq=%d "
"ibm,get-xive returns %d\n",
- virq, status);
+ irq, status);
goto unlock;
}
goto unlock;
printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
- virq, cpu);
+ irq, cpu);
/* Reset affinity to all cpus */
xics_status[0] = default_distrib_server;
- status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
- xics_status[0], xics_status[1]);
+ status = rtas_call(ibm_set_xive, 3, 1, NULL,
+ irq, xics_status[0], xics_status[1]);
if (status)
- printk(KERN_ERR "migrate_irqs_away: irq=%d "
+ printk(KERN_ERR "migrate_irqs_away irq=%d "
"ibm,set-xive returns %d\n",
- virq, status);
+ irq, status);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
+
}
#endif
EXTRA_CFLAGS += -mno-minimal-toc
-obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o slb_low.o slb.o mmap.o
+obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
unsigned long is_write = error_code & 0x02000000;
unsigned long trap = TRAP(regs);
- BUG_ON((trap == 0x380) || (trap == 0x480));
-
- if (trap == 0x300) {
+ if (trap == 0x300 || trap == 0x380) {
if (debugger_fault_handler(regs))
return 0;
}
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (!user_mode(regs) && (address >= TASK_SIZE))
+ if (!user_mode(regs) && (trap == 0x380 || address >= TASK_SIZE))
return SIGSEGV;
if (error_code & 0x00400000) {
struct mm_struct *mm;
pte_t *ptep;
int ret;
+ int cpu;
int user_region = 0;
int local = 0;
cpumask_t tmp;
if (pgdir == NULL)
return 1;
- tmp = cpumask_of_cpu(smp_processor_id());
+ cpu = get_cpu();
+ tmp = cpumask_of_cpu(cpu);
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
ret = hash_huge_page(mm, access, ea, vsid, local);
else {
ptep = find_linux_pte(pgdir, ea);
- if (ptep == NULL)
+ if (ptep == NULL) {
+ put_cpu();
return 1;
+ }
ret = __hash_page(ea, access, vsid, ptep, trap, local);
}
+ put_cpu();
return ret;
}
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
- max_pfn = max_low_pfn;
-
/* add all physical memory to the bootmem map. Also find the first */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ max_pfn = max_low_pfn;
#ifdef CONFIG_DISCONTIGMEM
{
totalram_pages += free_all_bootmem();
- for (addr = KERNELBASE; addr < (unsigned long)__va(lmb_end_of_DRAM());
+ for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
addr += PAGE_SIZE) {
if (!PageReserved(virt_to_page(addr)))
continue;
void *pgdir;
pte_t *ptep;
int local = 0;
+ int cpu;
cpumask_t tmp;
- unsigned long flags;
/* handle i-cache coherency */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) &&
vsid = get_vsid(vma->vm_mm->context.id, ea);
- local_irq_save(flags);
- tmp = cpumask_of_cpu(smp_processor_id());
+ cpu = get_cpu();
+ tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
- local_irq_restore(flags);
+ put_cpu();
}
void * reserve_phb_iospace(unsigned long size)
+++ /dev/null
-/*
- * linux/arch/ppc64/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_32BIT))
- return 1;
-
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
- max_pfn = max_low_pfn;
if (parse_numa_properties())
setup_nonnuma();
+++ /dev/null
-/*
- * PowerPC64 SLB support.
- *
- * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- * Based on earlier code writteh by:
- * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
- * Copyright (c) 2001 Dave Engebretsen
- * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/paca.h>
-#include <asm/naca.h>
-#include <asm/cputable.h>
-
-extern void slb_allocate(unsigned long ea);
-
-static inline void create_slbe(unsigned long ea, unsigned long vsid,
- unsigned long flags, unsigned long entry)
-{
- ea = (ea & ESID_MASK) | SLB_ESID_V | entry;
- vsid = (vsid << SLB_VSID_SHIFT) | flags;
- asm volatile("slbmte %0,%1" :
- : "r" (vsid), "r" (ea)
- : "memory" );
-}
-
-static void slb_add_bolted(void)
-{
-#ifndef CONFIG_PPC_ISERIES
- WARN_ON(!irqs_disabled());
-
- /* If you change this make sure you change SLB_NUM_BOLTED
- * appropriately too */
-
- /* Slot 1 - first VMALLOC segment
- * Since modules end up there it gets hit very heavily.
- */
- create_slbe(VMALLOCBASE, get_kernel_vsid(VMALLOCBASE),
- SLB_VSID_KERNEL, 1);
-
- asm volatile("isync":::"memory");
-#endif
-}
-
-/* Flush all user entries from the segment table of the current processor. */
-void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
-{
- unsigned long offset = get_paca()->slb_cache_ptr;
- unsigned long esid_data;
- unsigned long pc = KSTK_EIP(tsk);
- unsigned long stack = KSTK_ESP(tsk);
- unsigned long unmapped_base;
-
- if (offset <= SLB_CACHE_ENTRIES) {
- int i;
- asm volatile("isync" : : : "memory");
- for (i = 0; i < offset; i++) {
- esid_data = (unsigned long)get_paca()->slb_cache[i]
- << SID_SHIFT;
- asm volatile("slbie %0" : : "r" (esid_data));
- }
- asm volatile("isync" : : : "memory");
- } else {
- asm volatile("isync; slbia; isync" : : : "memory");
- slb_add_bolted();
- }
-
- /* Workaround POWER5 < DD2.1 issue */
- if (offset == 1 || offset > SLB_CACHE_ENTRIES) {
- /* flush segment in EEH region, we shouldn't ever
- * access addresses in this region. */
- asm volatile("slbie %0" : : "r"(EEHREGIONBASE));
- }
-
- get_paca()->slb_cache_ptr = 0;
- get_paca()->context = mm->context;
-
- /*
- * preload some userspace segments into the SLB.
- */
- if (test_tsk_thread_flag(tsk, TIF_32BIT))
- unmapped_base = TASK_UNMAPPED_BASE_USER32;
- else
- unmapped_base = TASK_UNMAPPED_BASE_USER64;
-
- if (pc >= KERNELBASE)
- return;
- slb_allocate(pc);
-
- if (GET_ESID(pc) == GET_ESID(stack))
- return;
-
- if (stack >= KERNELBASE)
- return;
- slb_allocate(stack);
-
- if ((GET_ESID(pc) == GET_ESID(unmapped_base))
- || (GET_ESID(stack) == GET_ESID(unmapped_base)))
- return;
-
- if (unmapped_base >= KERNELBASE)
- return;
- slb_allocate(unmapped_base);
-}
-
-void slb_initialize(void)
-{
-#ifdef CONFIG_PPC_ISERIES
- asm volatile("isync; slbia; isync":::"memory");
-#else
- unsigned long flags = SLB_VSID_KERNEL;
-
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
- if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
- flags |= SLB_VSID_L;
-
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
- create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE),
- flags, 0);
-
-#endif
- slb_add_bolted();
- get_paca()->stab_rr = SLB_NUM_BOLTED;
-}
+++ /dev/null
-/*
- * arch/ppc64/mm/slb_low.S
- *
- * Low-level SLB routines
- *
- * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- *
- * Based on earlier C version:
- * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
- * Copyright (c) 2001 Dave Engebretsen
- * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
-#include <asm/cputable.h>
-
-/* void slb_allocate(unsigned long ea);
- *
- * Create an SLB entry for the given EA (user or kernel).
- * r3 = faulting address, r13 = PACA
- * r9, r10, r11 are clobbered by this function
- * No other registers are examined or changed.
- */
-_GLOBAL(slb_allocate)
- /*
- * First find a slot, round robin. Previously we tried to find
- * a free slot first but that took too long. Unfortunately we
- * dont have any LRU information to help us choose a slot.
- */
- ld r10,PACASTABRR(r13)
-3:
- addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
-
- blt+ 4f
- li r10,SLB_NUM_BOLTED
-
- /*
- * Never cast out the segment for our kernel stack. Since we
- * dont invalidate the ERAT we could have a valid translation
- * for the kernel stack during the first part of exception exit
- * which gets invalidated due to a tlbie from another cpu at a
- * non recoverable point (after setting srr0/1) - Anton
- */
-4: slbmfee r11,r10
- srdi r11,r11,27
- /*
- * Use paca->ksave as the value of the kernel stack pointer,
- * because this is valid at all times.
- * The >> 27 (rather than >> 28) is so that the LSB is the
- * valid bit - this way we check valid and ESID in one compare.
- * In order to completely close the tiny race in the context
- * switch (between updating r1 and updating paca->ksave),
- * we check against both r1 and paca->ksave.
- */
- srdi r9,r1,27
- ori r9,r9,1 /* mangle SP for later compare */
- cmpd r11,r9
- beq- 3b
- ld r9,PACAKSAVE(r13)
- srdi r9,r9,27
- ori r9,r9,1
- cmpd r11,r9
- beq- 3b
-
- std r10,PACASTABRR(r13)
-
- /* r3 = faulting address, r10 = entry */
-
- srdi r9,r3,60 /* get region */
- srdi r3,r3,28 /* get esid */
- cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
-
- /* r9 = region, r3 = esid, cr7 = <>KERNELBASE */
-
- rldicr. r11,r3,32,16
- bne- 8f /* invalid ea bits set */
- addi r11,r9,-1
- cmpldi r11,0xb
- blt- 8f /* invalid region */
-
- /* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */
-
- blt cr7,0f /* user or kernel? */
-
- /* kernel address */
- li r11,SLB_VSID_KERNEL
-BEGIN_FTR_SECTION
- bne cr7,9f
- li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
- b 9f
-
-0: /* user address */
- li r11,SLB_VSID_USER
-#ifdef CONFIG_HUGETLB_PAGE
-BEGIN_FTR_SECTION
- /* check against the hugepage ranges */
- cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT)
- bge 6f /* >= TASK_HPAGE_END */
- cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT)
- bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */
- cmpldi r3,16
- bge 6f /* 4GB..TASK_HPAGE_BASE */
-
- lhz r9,PACAHTLBSEGS(r13)
- srd r9,r9,r3
- andi. r9,r9,1
- beq 6f
-
-5: /* this is a hugepage user address */
- li r11,(SLB_VSID_USER|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-#endif /* CONFIG_HUGETLB_PAGE */
-
-6: ld r9,PACACONTEXTID(r13)
-
-9: /* r9 = "context", r3 = esid, r11 = flags, r10 = entry */
-
- rldimi r9,r3,15,0 /* r9= VSID ordinal */
-
-7: rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
- oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
-
- /* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */
-
- li r3,VSID_RANDOMIZER@higher
- sldi r3,r3,32
- oris r3,r3,VSID_RANDOMIZER@h
- ori r3,r3,VSID_RANDOMIZER@l
-
- mulld r9,r3,r9 /* r9 = ordinal * VSID_RANDOMIZER */
- clrldi r9,r9,28 /* r9 &= VSID_MASK */
- sldi r9,r9,SLB_VSID_SHIFT /* r9 <<= SLB_VSID_SHIFT */
- or r9,r9,r11 /* r9 |= flags */
-
- /* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing.
- */
- slbmte r9,r10
-
- bgelr cr7 /* we're done for kernel addresses */
-
- /* Update the slb cache */
- lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
- cmpldi r3,SLB_CACHE_ENTRIES
- bge 1f
-
- /* still room in the slb cache */
- sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
- rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
- add r11,r11,r13 /* r11 = (u16 *)paca + offset */
- sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
- addi r3,r3,1 /* offset++ */
- b 2f
-1: /* offset >= SLB_CACHE_ENTRIES */
- li r3,SLB_CACHE_ENTRIES+1
-2:
- sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
- blr
-
-8: /* invalid EA */
- li r9,0 /* 0 VSID ordinal -> BAD_VSID */
- li r11,SLB_VSID_USER /* flags don't much matter */
- b 7b
If unsure, say N.
-config QDIO_DEBUG
- bool "Extended debugging information"
- depends on QDIO
- help
- Say Y here to get extended debugging output in /proc/s390dbf/qdio...
- Warning: this option reduces the performance of the QDIO module.
-
- If unsure, say N.
-
comment "Misc"
config PREEMPT
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/smp.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/page-flags.h>
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
static int appldata_interval_handler(ctl_table *ctl, int write,
struct file *filp,
void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ size_t *lenp);
static struct ctl_table_header *appldata_sysctl_header;
static struct ctl_table appldata_table[] = {
*/
static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int len;
char buf[2];
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
return 0;
}
spin_unlock(&appldata_timer_lock);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
*/
static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int len, interval;
char buf[16];
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
return 0;
}
interval);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
*/
static int
appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
struct appldata_ops *ops = NULL, *tmp_ops;
int rc, len, found;
}
spin_unlock_bh(&appldata_ops_lock);
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
module_put(ops->owner);
return 0;
spin_unlock_bh(&appldata_ops_lock);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
module_put(ops->owner);
return 0;
}
install: $(CONFIGURE) $(obj)/image
sh -x $(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
- System.map init/kerntypes.o "$(INSTALL_PATH)"
+ System.map Kerntypes "$(INSTALL_PATH)"
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
-# $4 - kernel type file
-# $5 - default install path (blank if root directory)
+# $4 - default install path (blank if root directory)
#
# User may have a custom install script
# Default install - same as make zlilo
-if [ -f $5/vmlinuz ]; then
- mv $5/vmlinuz $5/vmlinuz.old
+if [ -f $4/vmlinuz ]; then
+ mv $4/vmlinuz $4/vmlinuz.old
fi
-if [ -f $5/System.map ]; then
- mv $5/System.map $5/System.old
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
fi
-if [ -f $5/Kerntypes ]; then
- mv $5/Kerntypes $5/Kerntypes.old
-fi
-
-cat $2 > $5/vmlinuz
-cp $3 $5/System.map
-
-# copy the kernel type file if it exists
-if [ -f $4 ]; then
- cp $4 $5/Kerntypes
-fi
+cat $2 > $4/vmlinuz
+cp $3 $4/System.map
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_MACHCHK_WARNING=y
CONFIG_QDIO=y
# CONFIG_QDIO_PERF_STATS is not set
-# CONFIG_QDIO_DEBUG is not set
#
# Misc
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_CSZ=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
-# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DELAY is not set
# CONFIG_NET_SCH_INGRESS is not set
CONFIG_NET_QOS=y
CONFIG_NET_ESTIMATOR=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+# CONFIG_FAT_FS is not set
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_GENERIC is not set
+# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
} _sigev_un;
};
-extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
-extern int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from);
-
#endif /* _ASM_S390X_S390_H */
return err;
}
-int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from)
-{
- int err;
- u32 tmp;
-
- if (!access_ok (VERIFY_READ, from, sizeof(siginfo_t32)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (from->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (from->si_code >> 16) {
- case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
- case __SI_MESGQ >> 16:
- err |= __get_user(to->si_int, &from->si_int);
- /* fallthrough */
- case __SI_KILL >> 16:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case __SI_CHLD >> 16:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- break;
- case __SI_FAULT >> 16:
- err |= __get_user(tmp, &from->si_addr);
- to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
- break;
- case __SI_POLL >> 16:
- case __SI_TIMER >> 16:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- default:
- break;
- }
- }
- return err;
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
+ .macro SAVE_ALL_BASE psworg,savearea,sync
+ stm %r12,%r15,\savearea
+ l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm
- .macro SAVE_ALL psworg,savearea,sync
- la %r12,\psworg
+ .macro CLEANUP_SAVE_ALL_BASE psworg,savearea,sync
+ l %r1,SP_PSW+4(%r15)
+ cli 1(%r1),0xcf
+ bne BASED(0f)
+ mvc \savearea(16),SP_R12(%r15)
+0: st %r13,SP_R13(%r15)
+ .endm
+
+ .macro SAVE_ALL psworg,savearea,sync
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- bz BASED(2f) # skip stack setup save
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ bz BASED(1f) # skip stack setup save
+ l %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- bnz BASED(1f) # from user -> load async stack
- clc \psworg+4(4),BASED(.Lcritical_end)
- bhe BASED(0f)
- clc \psworg+4(4),BASED(.Lcritical_start)
- bl BASED(0f)
- l %r14,BASED(.Lcleanup_critical)
- basr %r14,%r14
- tm 0(%r12),0x01 # retest problem state after cleanup
- bnz BASED(1f)
-0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
- slr %r14,%r15
+ tm \psworg+1,0x01 # test problem state bit
+ bnz BASED(0f) # from user -> load async stack
+ l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
+ slr %r14,%r15
sra %r14,13
- be BASED(2f)
-1: l %r15,__LC_ASYNC_STACK
+ be BASED(1f)
+0: l %r15,__LC_ASYNC_STACK
.endif
-2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
- mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
- la %r12,\psworg
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,12,__LC_SVC_ILC
- stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_ILC(%r15)
- mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
- la %r12,0
- st %r12,0(%r15) # clear back chain
+1: s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ l %r14,BASED(.L\psworg)
+ slr %r12,%r12
+ icm %r14,12,__LC_SVC_ILC
+ stm %r0,%r11,SP_R0(%r15) # store gprs 0-12 to kernel stack
+ st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ mvc SP_R12(16,%r15),\savearea # move R13-R15 to stack
+ mvc SP_PSW(8,%r15),\psworg # move user PSW to stack
+ st %r14,SP_ILC(%r15)
+ st %r12,0(%r15) # clear back chain
+ .endm
+
+ .macro CLEANUP_SAVE_ALL psworg,savearea,sync
+ l %r1,\savearea+12
+ .if \sync
+ tm \psworg+1,0x01
+ bz BASED(1f)
+ l %r1,__LC_KERNEL_STACK
+ .else
+ tm \psworg+1,0x01
+ bnz BASED(0f)
+ l %r0,__LC_ASYNC_STACK
+ slr %r0,%r1
+ sra %r0,13
+ bz BASED(1f)
+0: l %r1,__LC_ASYNC_STACK
+ .endif
+1: s %r1,BASED(.Lc_spsize)
+ st %r1,SP_R15(%r15)
+ l %r0,BASED(.L\psworg)
+ xc SP_R12(4,%r15),SP_R12(%r15)
+ icm %r0,12,__LC_SVC_ILC
+ st %r0,SP_R14(%r15)
+ mvc SP_R0(48,%r1),SP_R0(%r15)
+ mvc SP_ORIG_R2(4,%r1),SP_R2(%r15)
+ mvc SP_R12(16,%r1),\savearea
+ mvc SP_PSW(8,%r1),\psworg
+ st %r0,SP_ILC(%r1)
+ xc 0(4,%r1),0(%r1)
.endm
- .macro RESTORE_ALL sync
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
- .endif
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpsw __LC_RETURN_PSW # back to caller
+ .macro RESTORE_ALL # system exit macro
+ mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpsw __LC_RETURN_PSW # back to caller
+ .endm
+
+ .macro CLEANUP_RESTORE_ALL
+ l %r1,SP_PSW+4(%r15)
+ cli 0(%r1),0x82
+ bne BASED(0f)
+ mvc SP_PSW(8,%r15),__LC_RETURN_PSW
+ b BASED(1f)
+0: l %r1,SP_R15(%r15)
+ mvc SP_PSW(8,%r15),SP_PSW(%r1)
+ mvc SP_R0(64,%r15),SP_R0(%r1)
+1:
+ .endm
+
+ .macro GET_THREAD_INFO
+ l %r9,__LC_THREAD_INFO
+ .endm
+
+ .macro CHECK_CRITICAL
+ tm SP_PSW+1(%r15),0x01 # test problem state bit
+ bnz BASED(0f) # from user -> not critical
+ clc SP_PSW+4(4,%r15),BASED(.Lcritical_end)
+ bnl BASED(0f)
+ clc SP_PSW+4(4,%r15),BASED(.Lcritical_start)
+ bl BASED(0f)
+ l %r1,BASED(.Lcleanup_critical)
+ basr %r14,%r1
+0:
.endm
/*
.globl system_call
system_call:
- SAVE_ALL_BASE __LC_SAVE_AREA
+ SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
+sysc_enter:
+ GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
sla %r7,2 # *4 and test for svc 0
bnz BASED(sysc_nr_ok) # svc number > 0
# svc 0: system call number in %r1
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL 1
+ RESTORE_ALL
#
# recheck if there is more work to do
#
sysc_work_loop:
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bz BASED(sysc_leave) # there is no work to do
#
.globl ret_from_fork
ret_from_fork:
l %r13,__LC_SVC_NEW_PSW+4
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,BASED(.Lschedtail)
basr %r14,%r1
stosm 24(%r15),0x03 # reenable interrupts
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- SAVE_ALL_BASE __LC_SAVE_AREA
+ SAVE_ALL_BASE __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
pgm_do_call:
l %r7,BASED(.Ljump_table)
sll %r8,2
+ GET_THREAD_INFO
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
la %r14,BASED(sysc_return)
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
be BASED(pgm_svcper)
# no interesting special case, ignore PER event
- lm %r12,%r15,__LC_SAVE_AREA
+ lm %r13,%r15,__LC_SAVE_AREA
lpsw 0x28
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
.globl io_int_handler
io_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL_BASE __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to standard irq handler
tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_PREEMPT
io_preempt:
l %r1,BASED(.Lschedule)
basr %r14,%r1 # call schedule
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
b BASED(io_resume_loop)
#endif
stosm 24(%r15),0x03 # reenable interrupts
basr %r14,%r1 # call scheduler
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_INT
bz BASED(io_leave) # there is no work to do
b BASED(io_work_loop)
.globl ext_int_handler
ext_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL_BASE __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
la %r2,SP_PTREGS(%r15) # address of register-save area
lh %r3,__LC_EXT_INT_CODE # get interruption code
l %r1,BASED(.Ldo_extint)
.globl mcck_int_handler
mcck_int_handler:
- SAVE_ALL_BASE __LC_SAVE_AREA+32
+ SAVE_ALL_BASE __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
mcck_return:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table_system_call:
- .long system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_return:
- .long sysc_return + 0x80000000, sysc_leave + 0x80000000
-cleanup_table_sysc_leave:
- .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
-cleanup_table_sysc_work_loop:
- .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
+cleanup_table:
+ .long system_call, sysc_enter, cleanup_sysc_enter
+ .long sysc_return, sysc_leave, cleanup_sysc_return
+ .long sysc_leave, sysc_work_loop, cleanup_sysc_leave
+ .long sysc_work_loop, sysc_reschedule, cleanup_sysc_return
+cleanup_table_entries=(.-cleanup_table) / 12
cleanup_critical:
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_system_call+4)
- bl BASED(cleanup_system_call)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_return)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
- bl BASED(cleanup_sysc_return)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
- bl BASED(cleanup_sysc_leave)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
- bl BASED(cleanup_sysc_leave)
-0:
+ lhi %r0,cleanup_table_entries
+ la %r1,BASED(cleanup_table)
+ l %r2,SP_PSW+4(%r15)
+ la %r2,0(%r2)
+cleanup_loop:
+ cl %r2,0(%r1)
+ bl BASED(cleanup_cont)
+ cl %r2,4(%r1)
+ bl BASED(cleanup_found)
+cleanup_cont:
+ la %r1,12(%r1)
+ bct %r0,BASED(cleanup_loop)
br %r14
+cleanup_found:
+ l %r1,8(%r1)
+ br %r1
-cleanup_system_call:
- mvc __LC_RETURN_PSW(4),0(%r12)
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bne BASED(0f)
- mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
-0: st %r13,__LC_SAVE_AREA+20
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- st %r15,__LC_SAVE_AREA+28
- lh %r7,0x8a
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
- la %r12,__LC_RETURN_PSW
+cleanup_sysc_enter:
+ CLEANUP_SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ lh %r0,0x8a
+ st %r0,SP_R7(%r15)
+ la %r1,BASED(sysc_enter)
+ o %r1,BASED(.Lamode)
+ st %r1,SP_PSW+4(%r15)
br %r14
cleanup_sysc_return:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
- la %r12,__LC_RETURN_PSW
+ la %r1,BASED(sysc_return)
+ o %r1,BASED(.Lamode)
+ st %r1,SP_PSW+4(%r15)
br %r14
cleanup_sysc_leave:
- clc 4(4,%r12),BASED(cleanup_sysc_leave_lpsw)
- be BASED(0f)
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
- lm %r0,%r11,SP_R0(%r15)
- l %r15,SP_R15(%r15)
-0: la %r12,__LC_RETURN_PSW
+ CLEANUP_RESTORE_ALL
br %r14
-cleanup_sysc_leave_lpsw:
- .long sysc_leave + 10 + 0x80000000
/*
* Integer constants
.Lc_overhead: .long STACK_FRAME_OVERHEAD
.Lc_pactive: .long PREEMPT_ACTIVE
.Lnr_syscalls: .long NR_syscalls
-.L0x018: .short 0x018
-.L0x020: .short 0x020
-.L0x028: .short 0x028
-.L0x030: .short 0x030
-.L0x038: .short 0x038
+.L0x018: .long 0x018
+.L0x020: .long 0x020
+.L0x028: .long 0x028
+.L0x030: .long 0x030
+.L0x038: .long 0x038
+.Lamode: .long 0x80000000
/*
* Symbol constants
_TIF_RESTART_SVC | _TIF_SINGLE_STEP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
-#define BASED(name) name-system_call(%r13)
-
/*
* Register usage in interrupt handlers:
* R9 - pointer to current task structure
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
- stmg %r12,%r15,\savearea
- larl %r13,system_call
- .endm
-
.macro SAVE_ALL psworg,savearea,sync
- la %r12,\psworg
+ stmg %r13,%r15,\savearea
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- jz 2f # skip stack setup save
- lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ jz 1f # skip stack setup save
+ lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- jnz 1f # from user -> load kernel stack
- clc \psworg+8(8),BASED(.Lcritical_end)
- jhe 0f
- clc \psworg+8(8),BASED(.Lcritical_start)
- jl 0f
- brasl %r14,cleanup_critical
- tm 0(%r12),0x01 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
+ tm \psworg+1,0x01 # test problem state bit
+ jnz 0f # from user -> load kernel stack
+ lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
slgr %r14,%r15
srag %r14,%r14,14
- jz 2f
-1: lg %r15,__LC_ASYNC_STACK # load async stack
+ jz 1f
+0: lg %r15,__LC_ASYNC_STACK # load async stack
.endif
-2: aghi %r15,-SP_SIZE # make room for registers & psw
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- la %r12,\psworg
- stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,12,__LC_SVC_ILC
- stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_ILC(%r15)
- mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
- la %r12,0
- stg %r12,0(%r15)
+1: aghi %r15,-SP_SIZE # make room for registers & psw
+ lghi %r14,\psworg
+ slgr %r13,%r13
+ icm %r14,12,__LC_SVC_ILC
+ stmg %r0,%r12,SP_R0(%r15) # store gprs 0-13 to kernel stack
+ stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ mvc SP_R13(24,%r15),\savearea # move r13, r14 and r15 to stack
+ mvc SP_PSW(16,%r15),\psworg # move user PSW to stack
+ st %r14,SP_ILC(%r15)
+ stg %r13,0(%r15)
.endm
- .macro RESTORE_ALL sync
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ .macro CLEANUP_SAVE_ALL psworg,savearea,sync
+ lg %r1,SP_PSW+8(%r15)
+ cli 1(%r1),0xdf
+ jne 2f
+ mvc \savearea(24),SP_R13(%r15)
+2: lg %r1,\savearea+16
+ .if \sync
+ tm \psworg+1,0x01
+ jz 1f
+ lg %r1,__LC_KERNEL_STACK
+ .else
+ tm \psworg+1,0x01
+ jnz 0f
+ lg %r0,__LC_ASYNC_STACK
+ slgr %r0,%r1
+ srag %r0,%r0,14
+ jz 1f
+0: lg %r1,__LC_ASYNC_STACK
.endif
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpswe __LC_RETURN_PSW # back to caller
+1: aghi %r1,-SP_SIZE
+ stg %r1,SP_R15(%r15)
+ lghi %r0,\psworg
+ xc SP_R13(8,%r15),SP_R13(%r15)
+ icm %r0,12,__LC_SVC_ILC
+ stg %r0,SP_R14(%r15)
+ mvc SP_R0(104,%r1),SP_R0(%r15)
+ mvc SP_ORIG_R2(8,%r1),SP_R2(%r15)
+ mvc SP_R13(24,%r1),\savearea
+ mvc SP_PSW(16,%r1),\psworg
+ st %r0,SP_ILC(%r1)
+ xc 0(8,%r1),0(%r1)
+ .endm
+
+ .macro RESTORE_ALL # system exit macro
+ mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpswe __LC_RETURN_PSW # back to caller
+ .endm
+
+ .macro CLEANUP_RESTORE_ALL
+ lg %r1,SP_PSW+8(%r15)
+ cli 0(%r1),0xb2
+ jne 0f
+ mvc SP_PSW(16,%r15),__LC_RETURN_PSW
+ j 1f
+0: lg %r1,SP_R15(%r15)
+ mvc SP_PSW(16,%r15),SP_PSW(%r1)
+ mvc SP_R0(128,%r15),SP_R0(%r1)
+1:
+ .endm
+
+ .macro GET_THREAD_INFO
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ .endm
+
+ .macro CHECK_CRITICAL
+ tm SP_PSW+1(%r15),0x01 # test problem state bit
+ jnz 0f # from user -> not critical
+ larl %r1,.Lcritical_start
+ clc SP_PSW+8(8,%r15),8(%r1) # compare ip with __critical_end
+ jnl 0f
+ clc SP_PSW+8(8,%r15),0(%r1) # compare ip with __critical_start
+ jl 0f
+ brasl %r14,cleanup_critical
+0:
.endm
/*
.globl system_call
system_call:
- SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+sysc_enter:
+ GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ lghi %r0,NR_syscalls
+ clr %r1,%r0
jnl sysc_nr_ok
lgfr %r7,%r1 # clear high word in r1
slag %r7,%r7,2 # svc 0: system call number in %r1
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL 1
+ RESTORE_ALL
#
# recheck if there is more work to do
#
sysc_work_loop:
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jz sysc_leave # there is no work to do
#
# a new process exits the kernel with ret_from_fork
#
.globl ret_from_fork
-ret_from_fork:
- lg %r13,__LC_SVC_NEW_PSW+8
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ret_from_fork:
+ GET_THREAD_INFO # load pointer to task_struct to R9
brasl %r14,schedule_tail
stosm 24(%r15),0x03 # reenable interrupts
j sysc_return
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
pgm_do_call:
sll %r8,3
+ GET_THREAD_INFO
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
- lmg %r12,%r15,__LC_SAVE_AREA
lpswe __LC_PGM_OLD_PSW
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
*/
.globl io_int_handler
io_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
tm __TI_flags+7(%r9),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_PREEMPT
io_preempt:
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call schedule
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
j io_resume_loop
#endif
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call scheduler
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_INT
jz io_leave # there is no work to do
j io_work_loop
*/
.globl ext_int_handler
ext_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ stck __LC_INT_CLOCK
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_EXT_INT_CODE # get interruption code
brasl %r14,do_extint
*/
.globl mcck_int_handler
mcck_int_handler:
- SAVE_ALL_BASE __LC_SAVE_AREA+64
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
brasl %r14,s390_do_machine_check
mcck_return:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table_system_call:
- .quad system_call, sysc_do_svc
-cleanup_table_sysc_return:
- .quad sysc_return, sysc_leave
-cleanup_table_sysc_leave:
- .quad sysc_leave, sysc_work_loop
-cleanup_table_sysc_work_loop:
- .quad sysc_work_loop, sysc_reschedule
+cleanup_table:
+ .quad system_call, sysc_enter, cleanup_sysc_enter
+ .quad sysc_return, sysc_leave, cleanup_sysc_return
+ .quad sysc_leave, sysc_work_loop, cleanup_sysc_leave
+ .quad sysc_work_loop, sysc_reschedule, cleanup_sysc_return
+cleanup_table_entries=(.-cleanup_table) / 24
cleanup_critical:
- clc 8(8,%r12),BASED(cleanup_table_system_call)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_system_call+8)
- jl cleanup_system_call
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_return)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
- jl cleanup_sysc_return
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
- jl cleanup_sysc_leave
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
- jl cleanup_sysc_leave
-0:
+ lghi %r0,cleanup_table_entries
+ larl %r1,cleanup_table
+ lg %r2,SP_PSW+8(%r15)
+cleanup_loop:
+ clg %r2,0(%r1)
+ jl cleanup_cont
+ clg %r2,8(%r1)
+ jl cleanup_found
+cleanup_cont:
+ la %r1,24(%r1)
+ brct %r0,cleanup_loop
br %r14
-
-cleanup_system_call:
- mvc __LC_RETURN_PSW(8),0(%r12)
- clc 8(8,%r12),BASED(cleanup_table_system_call)
- jne 0f
- mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
-0: stg %r13,__LC_SAVE_AREA+40
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- stg %r15,__LC_SAVE_AREA+56
- llgh %r7,__LC_SVC_INT_CODE
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
- la %r12,__LC_RETURN_PSW
+cleanup_found:
+ lg %r1,16(%r1)
+ br %r1
+
+cleanup_sysc_enter:
+ CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ llgh %r0,0x8a
+ stg %r0,SP_R7(%r15)
+ larl %r1,sysc_enter
+ stg %r1,SP_PSW+8(%r15)
br %r14
cleanup_sysc_return:
- mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
- la %r12,__LC_RETURN_PSW
+ larl %r1,sysc_return
+ stg %r1,SP_PSW+8(%r15)
br %r14
cleanup_sysc_leave:
- clc 8(8,%r12),BASED(cleanup_sysc_leave_lpsw)
- je 0f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
- lmg %r0,%r11,SP_R0(%r15)
- lg %r15,SP_R15(%r15)
-0: la %r12,__LC_RETURN_PSW
+ CLEANUP_RESTORE_ALL
br %r14
-cleanup_sysc_leave_lpsw:
- .quad sysc_leave + 12
/*
* Integer constants
.align 4
.Lconst:
.Lc_pactive: .long PREEMPT_ACTIVE
-.Lnr_syscalls: .long NR_syscalls
-.L0x0130: .short 0x130
-.L0x0140: .short 0x140
-.L0x0150: .short 0x150
-.L0x0160: .short 0x160
-.L0x0170: .short 0x170
.Lcritical_start:
.quad __critical_start
.Lcritical_end:
copied += sizeof(unsigned int);
}
return 0;
- case PTRACE_GETEVENTMSG:
- return put_user((__u32) child->ptrace_message,
- (unsigned int __user *) data);
- case PTRACE_GETSIGINFO:
- if (child->last_siginfo == NULL)
- return -EINVAL;
- return copy_siginfo_to_user32((siginfo_t32 __user *) data,
- child->last_siginfo);
- case PTRACE_SETSIGINFO:
- if (child->last_siginfo == NULL)
- return -EINVAL;
- return copy_siginfo_from_user32(child->last_siginfo,
- (siginfo_t32 __user *) data);
}
return ptrace_request(child, request, addr, data);
}
return s;
}
EXPORT_SYMBOL_NOVERS(memset);
+
+/*
+ * missing exports for string functions defined in lib/string.c
+ */
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(strchr);
+EXPORT_SYMBOL_NOVERS(strnchr);
+EXPORT_SYMBOL_NOVERS(strncmp);
+EXPORT_SYMBOL_NOVERS(strpbrk);
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extmem.o mmap.o
+obj-y := init.o fault.o ioremap.o extmem.o
obj-$(CONFIG_CMM) += cmm.o
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
-#include <asm/smp.h>
#include "../../../drivers/s390/net/smsgiucv.h"
static int
cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
char buf[16], *p;
long pages;
int len;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static int
cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
char buf[64], *p;
long pages, seconds;
int len;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
+++ /dev/null
-/*
- * linux/arch/s390/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_ARCH_S390X
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_31BIT))
- return 1;
-#endif
- return sysctl_legacy_va_layout ||
- (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
- depends on BROKEN
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
config SUN4
bool "Support for SUN4 machines (disables SUN4[CDM] support)"
- depends on !SMP
help
Say Y here if, and only if, your machine is a sun4. Note that
a kernel compiled with this option will run only on sun4.
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
-unsigned long cache_decay_ticks = 100;
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
}
}
-void smp_reschedule_irq(void)
-{
- set_need_resched();
-}
-
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
+extern unsigned long cpu_present_map;
extern int smp_num_cpus;
static int smp_highest_cpu;
extern int smp_threads_ready;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
+ init_idle();
+
/* Get our local ticker going. */
smp_setup_percpu_timer();
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
+extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
current_set[0] = NULL;
local_irq_enable();
- cpus_clear(cpu_present_map);
+ cpu_present_map = 0;
/* XXX This whole thing has to go. See sparc64. */
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
- cpu_set(mid, cpu_present_map);
- SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
+ cpu_present_map |= (1<<mid);
+ SMP_PRINTK(("cpu_present_map %08lx\n", cpu_present_map));
for(i=0; i < NR_CPUS; i++)
__cpu_number_map[i] = -1;
for(i=0; i < NR_CPUS; i++)
if(i == boot_cpu_id)
continue;
- if (cpu_isset(i, cpu_present_map)) {
+ if(cpu_present_map & (1 << i)) {
extern unsigned long sun4d_cpu_startup;
unsigned long *entry = &sun4d_cpu_startup;
struct task_struct *p;
}
}
if(!(cpu_callin_map[i])) {
- cpu_clear(i, cpu_present_map);
+ cpu_present_map &= ~(1 << i);
__cpu_number_map[i] = -1;
}
}
local_flush_cache_all();
if(cpucount == 0) {
printk("Error: only one Processor found.\n");
- cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
+ cpu_present_map = (1 << hard_smp4d_processor_id());
} else {
unsigned long bogosum = 0;
for(i = 0; i < NR_CPUS; i++) {
- if (cpu_isset(i, cpu_present_map)) {
+ if(cpu_present_map & (1 << i)) {
bogosum += cpu_data(i).udelay_val;
smp_highest_cpu = i;
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
- cpumask_t mask;
+ register unsigned long mask;
register int i;
- mask = cpumask_of_cpu(hard_smp4d_processor_id());
- cpus_andnot(mask, cpu_present_map, mask);
+ mask = (cpu_present_map & ~(1 << hard_smp4d_processor_id()));
for(i = 0; i <= high; i++) {
- if (cpu_isset(i, mask)) {
+ if(mask & (1 << i)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4d_send_ipi(i, IRQ_CROSS_CALL);
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
/* And set btfixup... */
- BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
+ init_idle();
+
/* Allow master to continue. */
swap((unsigned long *)&cpu_callin_map[cpuid], 1);
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
+extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
void __init sun4m_init_smp(void)
{
- BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
}
sun4_esp_physaddr=SUN4_400_ESP_PHYSADDR;
break;
default:
- ;
}
}
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
-#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.align 4
smp_do_cpu_idle:
+ call init_idle
+ nop
call cpu_idle
mov 0, %o0
/* Both these macros have to start with exactly the same insn */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + (offset) + 0x00], %t0; \
- lduh [%src + (offset) + 0x02], %t1; \
- lduh [%src + (offset) + 0x04], %t2; \
- lduh [%src + (offset) + 0x06], %t3; \
- sth %t0, [%dst + (offset) + 0x00]; \
- sth %t1, [%dst + (offset) + 0x02]; \
- sth %t2, [%dst + (offset) + 0x04]; \
- sth %t3, [%dst + (offset) + 0x06];
+ lduh [%src + offset + 0x00], %t0; \
+ lduh [%src + offset + 0x02], %t1; \
+ lduh [%src + offset + 0x04], %t2; \
+ lduh [%src + offset + 0x06], %t3; \
+ sth %t0, [%dst + offset + 0x00]; \
+ sth %t1, [%dst + offset + 0x02]; \
+ sth %t2, [%dst + offset + 0x04]; \
+ sth %t3, [%dst + offset + 0x06];
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
.text
.align 4
#endif
/* Both these macros have to start with exactly the same insn */
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
-
-#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- std %t0, [%dst - (offset) - 0x10]; \
- std %t2, [%dst - (offset) - 0x08];
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ std %t0, [%dst - offset - 0x10]; \
+ std %t2, [%dst - offset - 0x08];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
/* Both these macros have to start with exactly the same insn */
-#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- st %t0, [%dst - (offset) - 0x20]; \
- st %t1, [%dst - (offset) - 0x1c]; \
- st %t2, [%dst - (offset) - 0x18]; \
- st %t3, [%dst - (offset) - 0x14]; \
- st %t4, [%dst - (offset) - 0x10]; \
- st %t5, [%dst - (offset) - 0x0c]; \
- st %t6, [%dst - (offset) - 0x08]; \
- st %t7, [%dst - (offset) - 0x04];
-
-#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- std %t0, [%dst - (offset) - 0x20]; \
- std %t2, [%dst - (offset) - 0x18]; \
- std %t4, [%dst - (offset) - 0x10]; \
- std %t6, [%dst - (offset) - 0x08];
-
-#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c];
-
-#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src + (offset) + 0x00], %t0; \
- ldub [%src + (offset) + 0x01], %t1; \
- stb %t0, [%dst + (offset) + 0x00]; \
- stb %t1, [%dst + (offset) + 0x01];
-
-#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t5; \
- srl %t1, shir, %t6; \
- sll %t0, shil, %t0; \
- or %t5, %prev, %t5; \
- sll %t1, shil, %prev; \
- or %t6, %t0, %t0; \
- srl %t2, shir, %t1; \
- srl %t3, shir, %t6; \
- sll %t2, shil, %t2; \
- or %t1, %prev, %t1; \
- std %t4, [%dst + (offset) + (offset2) - 0x04]; \
- std %t0, [%dst + (offset) + (offset2) + 0x04]; \
- sll %t3, shil, %prev; \
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - offset - 0x20], %t0; \
+ ldd [%src - offset - 0x18], %t2; \
+ ldd [%src - offset - 0x10], %t4; \
+ ldd [%src - offset - 0x08], %t6; \
+ st %t0, [%dst - offset - 0x20]; \
+ st %t1, [%dst - offset - 0x1c]; \
+ st %t2, [%dst - offset - 0x18]; \
+ st %t3, [%dst - offset - 0x14]; \
+ st %t4, [%dst - offset - 0x10]; \
+ st %t5, [%dst - offset - 0x0c]; \
+ st %t6, [%dst - offset - 0x08]; \
+ st %t7, [%dst - offset - 0x04];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - offset - 0x20], %t0; \
+ ldd [%src - offset - 0x18], %t2; \
+ ldd [%src - offset - 0x10], %t4; \
+ ldd [%src - offset - 0x08], %t6; \
+ std %t0, [%dst - offset - 0x20]; \
+ std %t2, [%dst - offset - 0x18]; \
+ std %t4, [%dst - offset - 0x10]; \
+ std %t6, [%dst - offset - 0x08];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c];
+
+#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src + offset + 0x00], %t0; \
+ ldub [%src + offset + 0x01], %t1; \
+ stb %t0, [%dst + offset + 0x00]; \
+ stb %t1, [%dst + offset + 0x01];
+
+#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ srl %t0, shir, %t5; \
+ srl %t1, shir, %t6; \
+ sll %t0, shil, %t0; \
+ or %t5, %prev, %t5; \
+ sll %t1, shil, %prev; \
+ or %t6, %t0, %t0; \
+ srl %t2, shir, %t1; \
+ srl %t3, shir, %t6; \
+ sll %t2, shil, %t2; \
+ or %t1, %prev, %t1; \
+ std %t4, [%dst + offset + offset2 - 0x04]; \
+ std %t0, [%dst + offset + offset2 + 0x04]; \
+ sll %t3, shil, %prev; \
or %t6, %t2, %t4;
-#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t4; \
- srl %t1, shir, %t5; \
- sll %t0, shil, %t6; \
- or %t4, %prev, %t0; \
- sll %t1, shil, %prev; \
- or %t5, %t6, %t1; \
- srl %t2, shir, %t4; \
- srl %t3, shir, %t5; \
- sll %t2, shil, %t6; \
- or %t4, %prev, %t2; \
- sll %t3, shil, %prev; \
- or %t5, %t6, %t3; \
- std %t0, [%dst + (offset) + (offset2) + 0x00]; \
- std %t2, [%dst + (offset) + (offset2) + 0x08];
+#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ srl %t0, shir, %t4; \
+ srl %t1, shir, %t5; \
+ sll %t0, shil, %t6; \
+ or %t4, %prev, %t0; \
+ sll %t1, shil, %prev; \
+ or %t5, %t6, %t1; \
+ srl %t2, shir, %t4; \
+ srl %t3, shir, %t5; \
+ sll %t2, shil, %t6; \
+ or %t4, %prev, %t2; \
+ sll %t3, shil, %prev; \
+ or %t5, %t6, %t3; \
+ std %t0, [%dst + offset + offset2 + 0x00]; \
+ std %t2, [%dst + offset + offset2 + 0x08];
.text
.align 4
static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- FLUSH_BEGIN(vma->vm_mm)
+ struct mm_struct *mm = vma->vm_mm;
+
+ FLUSH_BEGIN(mm)
flush_user_windows();
turbosparc_idflash_clear();
FLUSH_END
static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- FLUSH_BEGIN(vma->vm_mm)
+ struct mm_struct *mm = vma->vm_mm;
+
+ FLUSH_BEGIN(mm)
srmmu_flush_whole_tlb();
FLUSH_END
}
fly. Currently there are only sparc64 drivers for UltraSPARC-III
and UltraSPARC-IIe processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-III processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
config SUNOS_EMUL
bool "SunOS binary emulation"
- depends on BINFMT_AOUT32
help
This allows you to run most SunOS binaries. If you want to do this,
say Y here and place appropriate files in /usr/gnemul/sunos. See
config SOLARIS_EMUL
tristate "Solaris binary emulation (EXPERIMENTAL)"
- depends on SPARC32_COMPAT && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This is experimental code which will enable you to run (many)
Solaris binaries on your SPARC Linux machine.
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
# CONFIG_BINFMT_AOUT32 is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
+# CONFIG_SUNOS_EMUL is not set
CONFIG_SOLARIS_EMUL=m
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=m
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_FB_CIRRUS is not set
CONFIG_FB_PM2=y
# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
+# CONFIG_FB_CYBER2000 is not set
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_BW2 is not set
#
# Serial drivers
#
+# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
CONFIG_SUN_MOSTEK_RTC=y
CONFIG_OBP_FLASH=m
# CONFIG_SUN_BPP is not set
+# CONFIG_SUN_VIDEOPIX is not set
+# CONFIG_SUN_AURORA is not set
#
# Memory Technology Devices (MTD)
CONFIG_SCSI_SATA_SIS=m
CONFIG_SCSI_SATA_VIA=m
CONFIG_SCSI_SATA_VITESSE=m
+# CONFIG_SCSI_BUSLOGIC is not set
CONFIG_SCSI_DMX3191D=m
+# CONFIG_SCSI_EATA is not set
CONFIG_SCSI_EATA_PIO=m
# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
CONFIG_SCSI_IPS=m
CONFIG_SCSI_INIA100=m
CONFIG_SCSI_PPA=m
# CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_QLA6322 is not set
CONFIG_SCSI_DC395x=m
-# CONFIG_SCSI_DC390T is not set
+CONFIG_SCSI_DC390T=m
CONFIG_SCSI_DEBUG=m
CONFIG_SCSI_SUNESP=y
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-# CONFIG_NET_SCH_CLK_JIFFIES is not set
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-CONFIG_NET_SCH_CLK_CPU=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
#
# Wireless 802.11b ISA/PCI cards support
#
+CONFIG_AIRO=m
CONFIG_HERMES=m
CONFIG_PLX_HERMES=m
CONFIG_TMD_HERMES=m
#
CONFIG_I2C_SENSOR=m
CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_DS1621=m
CONFIG_SENSORS_FSCHER=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
CONFIG_SENSORS_LM80=m
CONFIG_SENSORS_LM83=m
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_XATTR is not set
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
# CONFIG_NCPFS_PACKET_SIGNING is not set
CONFIG_SND_SUN_AMD7930=m
CONFIG_SND_SUN_CS4231=m
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
#
# USB support
#
# CONFIG_USB_OV511 is not set
CONFIG_USB_PWC=m
# CONFIG_USB_SE401 is not set
-CONFIG_USB_SN9C102=m
# CONFIG_USB_STV680 is not set
CONFIG_USB_W9968CF=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
loff_t pos = fd_offset;
/* Fuck me plenty... */
error = do_brk(N_TXTADDR(ex), ex.a_text);
- bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
ex.a_text, &pos);
error = do_brk(N_DATADDR(ex), ex.a_data);
- bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex),
ex.a_data, &pos);
goto beyond_if;
}
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex) & PAGE_MASK,
ex.a_text+ex.a_data + PAGE_SIZE - 1);
- bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
} else {
static unsigned long error_time;
if (!bprm->file->f_op->mmap) {
loff_t pos = fd_offset;
do_brk(0, ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,
- (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
goto beyond_if;
}
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
- .globl sunos_execv
+ .globl sunos_execv, sys_execve, sys32_execve
sys_execve:
sethi %hi(sparc_execve), %g1
ba,pt %xcc, execve_merge
or %g1, %lo(sparc_execve), %g1
-#ifdef CONFIG_COMPAT
- .globl sys_execve
sunos_execv:
stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
- .globl sys32_execve
sys32_execve:
sethi %hi(sparc32_execve), %g1
or %g1, %lo(sparc32_execve), %g1
-#endif
execve_merge:
flushw
jmpl %g1, %g0
add %sp, PTREGS_OFF, %o0
.globl sys_pipe, sys_sigpause, sys_nis_syscall
- .globl sys_sigsuspend, sys_rt_sigsuspend
+ .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
.globl sys_rt_sigreturn
- .globl sys_ptrace
- .globl sys_sigaltstack
+ .globl sys32_sigreturn, sys32_rt_sigreturn
+ .globl sys32_execve, sys_ptrace
+ .globl sys_sigaltstack, sys32_sigaltstack
+ .globl sys32_sigstack
.align 32
sys_pipe: ba,pt %xcc, sparc_pipe
add %sp, PTREGS_OFF, %o0
add %sp, PTREGS_OFF, %o1
sys_sigaltstack:ba,pt %xcc, do_sigaltstack
add %i6, STACK_BIAS, %o2
-#ifdef CONFIG_COMPAT
- .globl sys32_sigstack
sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
mov %i6, %o2
- .globl sys32_sigaltstack
sys32_sigaltstack:
ba,pt %xcc, do_sys32_sigaltstack
mov %i6, %o2
-#endif
+
.align 32
sys_sigsuspend: add %sp, PTREGS_OFF, %o0
call do_sigsuspend
call do_rt_sigsuspend
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_rt_sigsuspend
sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
srl %o0, 0, %o0
add %sp, PTREGS_OFF, %o2
call do_rt_sigsuspend32
add %o7, 1f-.-4, %o7
-#endif
/* NOTE: %o0 has a correct value already */
sys_sigpause: add %sp, PTREGS_OFF, %o1
call do_sigpause
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_sigreturn
sys32_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_sigreturn32
add %o7, 1f-.-4, %o7
nop
-#endif
sys_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_rt_sigreturn
sys32_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn32
add %o7, 1f-.-4, %o7
nop
-#endif
sys_ptrace: add %sp, PTREGS_OFF, %o0
call do_ptrace
add %o7, 1f-.-4, %o7
/* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
+ call cheetah_patch_pgcopyops
+ nop
call cheetah_patch_cachetlbops
nop
/* Use this to get at 32-bit user passed pointers.
* See sys_sparc32.c for description about it.
*/
-#define A(__x) compat_ptr(__x)
+#define A(__x) ((void __user *)(unsigned long)(__x))
static __inline__ void *alloc_user_space(long len)
{
static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcmap32 __user *argp = (void __user *)arg;
- struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
- u32 addr;
+ struct fbcmap f;
int ret;
+ char red[256], green[256], blue[256];
+ u32 r, g, b;
+ mm_segment_t old_fs = get_fs();
- ret = copy_in_user(p, argp, 2 * sizeof(int));
- ret |= get_user(addr, &argp->red);
- ret |= put_user(compat_ptr(addr), &p->red);
- ret |= get_user(addr, &argp->green);
- ret |= put_user(compat_ptr(addr), &p->green);
- ret |= get_user(addr, &argp->blue);
- ret |= put_user(compat_ptr(addr), &p->blue);
+ ret = get_user(f.index, &(((struct fbcmap32 __user *)arg)->index));
+ ret |= __get_user(f.count, &(((struct fbcmap32 __user *)arg)->count));
+ ret |= __get_user(r, &(((struct fbcmap32 __user *)arg)->red));
+ ret |= __get_user(g, &(((struct fbcmap32 __user *)arg)->green));
+ ret |= __get_user(b, &(((struct fbcmap32 __user *)arg)->blue));
if (ret)
return -EFAULT;
- return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
+ if ((f.index < 0) || (f.index > 255)) return -EINVAL;
+ if (f.index + f.count > 256)
+ f.count = 256 - f.index;
+ if (cmd == FBIOPUTCMAP32) {
+ ret = copy_from_user (red, A(r), f.count);
+ ret |= copy_from_user (green, A(g), f.count);
+ ret |= copy_from_user (blue, A(b), f.count);
+ if (ret)
+ return -EFAULT;
+ }
+ f.red = red; f.green = green; f.blue = blue;
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (long)&f);
+ set_fs (old_fs);
+ if (!ret && cmd == FBIOGETCMAP32) {
+ ret = copy_to_user (A(r), red, f.count);
+ ret |= copy_to_user (A(g), green, f.count);
+ ret |= copy_to_user (A(b), blue, f.count);
+ }
+ return ret ? -EFAULT : 0;
}
struct fbcursor32 {
static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
- struct fbcursor32 __user *argp = (void __user *)arg;
- compat_uptr_t addr;
+ struct fbcursor f;
int ret;
+ char red[2], green[2], blue[2];
+ char image[128], mask[128];
+ u32 r, g, b;
+ u32 m, i;
+ mm_segment_t old_fs = get_fs();
- ret = copy_in_user(p, argp,
+ ret = copy_from_user (&f, (struct fbcursor32 __user *) arg,
2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
- ret |= get_user(addr, &argp->cmap.red);
- ret |= put_user(compat_ptr(addr), &p->cmap.red);
- ret |= get_user(addr, &argp->cmap.green);
- ret |= put_user(compat_ptr(addr), &p->cmap.green);
- ret |= get_user(addr, &argp->cmap.blue);
- ret |= put_user(compat_ptr(addr), &p->cmap.blue);
- ret |= get_user(addr, &argp->mask);
- ret |= put_user(compat_ptr(addr), &p->mask);
- ret |= get_user(addr, &argp->image);
- ret |= put_user(compat_ptr(addr), &p->image);
+ ret |= __get_user(f.size.x,
+ &(((struct fbcursor32 __user *)arg)->size.x));
+ ret |= __get_user(f.size.y,
+ &(((struct fbcursor32 __user *)arg)->size.y));
+ ret |= __get_user(f.cmap.index,
+ &(((struct fbcursor32 __user *)arg)->cmap.index));
+ ret |= __get_user(f.cmap.count,
+ &(((struct fbcursor32 __user *)arg)->cmap.count));
+ ret |= __get_user(r, &(((struct fbcursor32 __user *)arg)->cmap.red));
+ ret |= __get_user(g, &(((struct fbcursor32 __user *)arg)->cmap.green));
+ ret |= __get_user(b, &(((struct fbcursor32 __user *)arg)->cmap.blue));
+ ret |= __get_user(m, &(((struct fbcursor32 __user *)arg)->mask));
+ ret |= __get_user(i, &(((struct fbcursor32 __user *)arg)->image));
if (ret)
return -EFAULT;
- return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
+ if (f.set & FB_CUR_SETCMAP) {
+ if ((uint) f.size.y > 32)
+ return -EINVAL;
+ ret = copy_from_user (mask, A(m), f.size.y * 4);
+ ret |= copy_from_user (image, A(i), f.size.y * 4);
+ if (ret)
+ return -EFAULT;
+ f.image = image; f.mask = mask;
+ }
+ if (f.set & FB_CUR_SETCMAP) {
+ ret = copy_from_user (red, A(r), 2);
+ ret |= copy_from_user (green, A(g), 2);
+ ret |= copy_from_user (blue, A(b), 2);
+ if (ret)
+ return -EFAULT;
+ f.cmap.red = red; f.cmap.green = green; f.cmap.blue = blue;
+ }
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, FBIOSCURSOR, (long)&f);
+ set_fs (old_fs);
+ return ret;
}
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
- drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ char __user *name_ptr, *date_ptr, *desc_ptr;
+ u32 tmp1, tmp2, tmp3;
+ drm_version_t kversion;
+ mm_segment_t old_fs;
int ret;
- if (clear_user(p, 3 * sizeof(int)) ||
- get_user(n, &uversion->name_len) ||
- put_user(n, &p->name_len) ||
- get_user(addr, &uversion->name) ||
- put_user(compat_ptr(addr), &p->name) ||
- get_user(n, &uversion->date_len) ||
- put_user(n, &p->date_len) ||
- get_user(addr, &uversion->date) ||
- put_user(compat_ptr(addr), &p->date) ||
- get_user(n, &uversion->desc_len) ||
- put_user(n, &p->desc_len) ||
- get_user(addr, &uversion->desc) ||
- put_user(compat_ptr(addr), &p->desc))
+ memset(&kversion, 0, sizeof(kversion));
+ if (get_user(kversion.name_len, &uversion->name_len) ||
+ get_user(kversion.date_len, &uversion->date_len) ||
+ get_user(kversion.desc_len, &uversion->desc_len) ||
+ get_user(tmp1, &uversion->name) ||
+ get_user(tmp2, &uversion->date) ||
+ get_user(tmp3, &uversion->desc))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
- if (ret)
- return ret;
-
- if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
- get_user(n, &p->name_len) ||
- put_user(n, &uversion->name_len) ||
- get_user(n, &p->date_len) ||
- put_user(n, &uversion->date_len) ||
- get_user(n, &p->desc_len) ||
- put_user(n, &uversion->desc_len))
- return -EFAULT;
+ name_ptr = A(tmp1);
+ date_ptr = A(tmp2);
+ desc_ptr = A(tmp3);
- return 0;
+ ret = -ENOMEM;
+ if (kversion.name_len && name_ptr) {
+ kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
+ if (!kversion.name)
+ goto out;
+ }
+ if (kversion.date_len && date_ptr) {
+ kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
+ if (!kversion.date)
+ goto out;
+ }
+ if (kversion.desc_len && desc_ptr) {
+ kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
+ if (!kversion.desc)
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if ((kversion.name &&
+ copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
+ (kversion.date &&
+ copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
+ (kversion.desc &&
+ copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
+ ret = -EFAULT;
+ if (put_user(kversion.version_major, &uversion->version_major) ||
+ put_user(kversion.version_minor, &uversion->version_minor) ||
+ put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
+ put_user(kversion.name_len, &uversion->name_len) ||
+ put_user(kversion.date_len, &uversion->date_len) ||
+ put_user(kversion.desc_len, &uversion->desc_len))
+ ret = -EFAULT;
+ }
+
+out:
+ if (kversion.name)
+ kfree(kversion.name);
+ if (kversion.date)
+ kfree(kversion.date);
+ if (kversion.desc)
+ kfree(kversion.desc);
+ return ret;
}
typedef struct drm32_unique {
static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
- drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ drm_unique_t karg;
+ mm_segment_t old_fs;
+ char __user *uptr;
+ u32 tmp;
int ret;
- if (get_user(n, &uarg->unique_len) ||
- put_user(n, &p->unique_len) ||
- get_user(addr, &uarg->unique) ||
- put_user(compat_ptr(addr), &p->unique))
+ if (get_user(karg.unique_len, &uarg->unique_len))
return -EFAULT;
+ karg.unique = NULL;
+
+ if (get_user(tmp, &uarg->unique))
+ return -EFAULT;
+
+ uptr = A(tmp);
+ if (uptr) {
+ karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
+ if (!karg.unique)
+ return -ENOMEM;
+ if (cmd == DRM32_IOCTL_SET_UNIQUE &&
+ copy_from_user(karg.unique, uptr, karg.unique_len)) {
+ kfree(karg.unique);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
if (cmd == DRM32_IOCTL_GET_UNIQUE)
- ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
else
- ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
+ set_fs(old_fs);
- if (ret)
- return ret;
+ if (!ret) {
+ if (cmd == DRM32_IOCTL_GET_UNIQUE &&
+ uptr != NULL &&
+ copy_to_user(uptr, karg.unique, karg.unique_len))
+ ret = -EFAULT;
+ if (put_user(karg.unique_len, &uarg->unique_len))
+ ret = -EFAULT;
+ }
- if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
- return -EFAULT;
+ if (karg.unique != NULL)
+ kfree(karg.unique);
- return 0;
+ return ret;
}
typedef struct drm32_map {
static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
- drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
- int ret;
+ drm_buf_desc_t __user *ulist;
+ drm_buf_info_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
- if (ret)
- return ret;
+ ulist = A(tmp);
- if (get_user(n, &p->count) || put_user(n, &uarg->count))
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
+ if (!karg.list)
return -EFAULT;
- return 0;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (karg.count <= orig_count &&
+ (copy_to_user(ulist, karg.list,
+ karg.count * sizeof(drm_buf_desc_t))))
+ ret = -EFAULT;
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ kfree(karg.list);
+
+ return ret;
}
typedef struct drm32_buf_free {
static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
- drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ drm_buf_free_t karg;
+ mm_segment_t old_fs;
+ int __user *ulist;
+ int ret;
+ u32 tmp;
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
return -EFAULT;
- return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
+ ulist = A(tmp);
+
+ karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
+ goto out;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+out:
+ kfree(karg.list);
+
+ return ret;
}
typedef struct drm32_buf_pub {
{
drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
drm32_buf_pub_t __user *ulist;
- drm_buf_map_t __user *arg64;
- drm_buf_pub_t __user *list;
+ drm_buf_map_t karg;
+ mm_segment_t old_fs;
int orig_count, ret, i;
- int n;
- compat_uptr_t addr;
+ u32 tmp1, tmp2;
- if (get_user(orig_count, &uarg->count))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp1, &uarg->virtual) ||
+ get_user(tmp2, &uarg->list))
return -EFAULT;
- arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
- (size_t)orig_count * sizeof(drm_buf_pub_t));
- list = (void __user *)(arg64 + 1);
+ karg.virtual = (void *) (unsigned long) tmp1;
+ ulist = A(tmp2);
- if (put_user(orig_count, &arg64->count) ||
- put_user(list, &arg64->list) ||
- get_user(addr, &uarg->virtual) ||
- put_user(compat_ptr(addr), &arg64->virtual) ||
- get_user(addr, &uarg->list))
- return -EFAULT;
+ orig_count = karg.count;
- ulist = compat_ptr(addr);
-
- for (i = 0; i < orig_count; i++) {
- if (get_user(n, &ulist[i].idx) ||
- put_user(n, &list[i].idx) ||
- get_user(n, &ulist[i].total) ||
- put_user(n, &list[i].total) ||
- get_user(n, &ulist[i].used) ||
- put_user(n, &list[i].used) ||
- get_user(addr, &ulist[i].address) ||
- put_user(compat_ptr(addr), &list[i].address))
- return -EFAULT;
- }
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
- ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
- if (ret)
- return ret;
-
- for (i = 0; i < orig_count; i++) {
- void __user *p;
- if (get_user(n, &list[i].idx) ||
- put_user(n, &ulist[i].idx) ||
- get_user(n, &list[i].total) ||
- put_user(n, &ulist[i].total) ||
- get_user(n, &list[i].used) ||
- put_user(n, &ulist[i].used) ||
- get_user(p, &list[i].address) ||
- put_user((unsigned long)p, &ulist[i].address))
- return -EFAULT;
+ ret = -EFAULT;
+ for (i = 0; i < karg.count; i++) {
+ if (get_user(karg.list[i].idx, &ulist[i].idx) ||
+ get_user(karg.list[i].total, &ulist[i].total) ||
+ get_user(karg.list[i].used, &ulist[i].used) ||
+ get_user(tmp1, &ulist[i].address))
+ goto out;
+
+ karg.list[i].address = (void *) (unsigned long) tmp1;
}
- if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
- return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ for (i = 0; i < orig_count; i++) {
+ tmp1 = (u32) (long) karg.list[i].address;
+ if (put_user(karg.list[i].idx, &ulist[i].idx) ||
+ put_user(karg.list[i].total, &ulist[i].total) ||
+ put_user(karg.list[i].used, &ulist[i].used) ||
+ put_user(tmp1, &ulist[i].address)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
- return 0;
+out:
+ kfree(karg.list);
+ return ret;
}
typedef struct drm32_dma {
static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
- drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
+ int __user *u_si, *u_ss, *u_ri, *u_rs;
+ drm_dma_t karg;
+ mm_segment_t old_fs;
int ret;
-
- if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
- get_user(addr, &uarg->send_indices) ||
- put_user(compat_ptr(addr), &p->send_indices) ||
- get_user(addr, &uarg->send_sizes) ||
- put_user(compat_ptr(addr), &p->send_sizes) ||
- copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
- copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
- get_user(addr, &uarg->request_indices) ||
- put_user(compat_ptr(addr), &p->request_indices) ||
- get_user(addr, &uarg->request_sizes) ||
- put_user(compat_ptr(addr), &p->request_sizes) ||
- copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
+ u32 tmp1, tmp2, tmp3, tmp4;
+
+ karg.send_indices = karg.send_sizes = NULL;
+ karg.request_indices = karg.request_sizes = NULL;
+
+ if (get_user(karg.context, &uarg->context) ||
+ get_user(karg.send_count, &uarg->send_count) ||
+ get_user(tmp1, &uarg->send_indices) ||
+ get_user(tmp2, &uarg->send_sizes) ||
+ get_user(karg.flags, &uarg->flags) ||
+ get_user(karg.request_count, &uarg->request_count) ||
+ get_user(karg.request_size, &uarg->request_size) ||
+ get_user(tmp3, &uarg->request_indices) ||
+ get_user(tmp4, &uarg->request_sizes) ||
+ get_user(karg.granted_count, &uarg->granted_count))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
- if (ret)
- return ret;
+ u_si = A(tmp1);
+ u_ss = A(tmp2);
+ u_ri = A(tmp3);
+ u_rs = A(tmp4);
+
+ if (karg.send_count) {
+ karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+ karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.send_indices || !karg.send_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.send_indices, u_si,
+ (karg.send_count * sizeof(int))) ||
+ copy_from_user(karg.send_sizes, u_ss,
+ (karg.send_count * sizeof(int))))
+ goto out;
+ }
- if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
- copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
- copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
- copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
- return -EFAULT;
+ if (karg.request_count) {
+ karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+ karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.request_indices || !karg.request_sizes)
+ goto out;
- return 0;
+ ret = -EFAULT;
+ if (copy_from_user(karg.request_indices, u_ri,
+ (karg.request_count * sizeof(int))) ||
+ copy_from_user(karg.request_sizes, u_rs,
+ (karg.request_count * sizeof(int))))
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (put_user(karg.context, &uarg->context) ||
+ put_user(karg.send_count, &uarg->send_count) ||
+ put_user(karg.flags, &uarg->flags) ||
+ put_user(karg.request_count, &uarg->request_count) ||
+ put_user(karg.request_size, &uarg->request_size) ||
+ put_user(karg.granted_count, &uarg->granted_count))
+ ret = -EFAULT;
+
+ if (karg.send_count) {
+ if (copy_to_user(u_si, karg.send_indices,
+ (karg.send_count * sizeof(int))) ||
+ copy_to_user(u_ss, karg.send_sizes,
+ (karg.send_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ if (karg.request_count) {
+ if (copy_to_user(u_ri, karg.request_indices,
+ (karg.request_count * sizeof(int))) ||
+ copy_to_user(u_rs, karg.request_sizes,
+ (karg.request_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ }
+
+out:
+ if (karg.send_indices)
+ kfree(karg.send_indices);
+ if (karg.send_sizes)
+ kfree(karg.send_sizes);
+ if (karg.request_indices)
+ kfree(karg.request_indices);
+ if (karg.request_sizes)
+ kfree(karg.request_sizes);
+
+ return ret;
}
typedef struct drm32_ctx_res {
static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
- drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int ret;
+ drm_ctx_t __user *ulist;
+ drm_ctx_res_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
- if (copy_in_user(p, uarg, sizeof(int)) ||
- get_user(addr, &uarg->contexts) ||
- put_user(compat_ptr(addr), &p->contexts))
+ karg.contexts = NULL;
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->contexts))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
- if (ret)
- return ret;
+ ulist = A(tmp);
- if (copy_in_user(uarg, p, sizeof(int)))
- return -EFAULT;
+ orig_count = karg.count;
+ if (karg.count && ulist) {
+ karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
+ if (!karg.contexts)
+ return -ENOMEM;
+ if (copy_from_user(karg.contexts, ulist,
+ (karg.count * sizeof(drm_ctx_t)))) {
+ kfree(karg.contexts);
+ return -EFAULT;
+ }
+ }
- return 0;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (orig_count) {
+ if (copy_to_user(ulist, karg.contexts,
+ (orig_count * sizeof(drm_ctx_t))))
+ ret = -EFAULT;
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ if (karg.contexts)
+ kfree(karg.contexts);
+
+ return ret;
}
#endif
#include <stdarg.h>
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/config.h>
#include <linux/reboot.h>
#include <linux/delay.h>
-#include <linux/compat.h>
#include <linux/init.h>
#include <asm/oplib.h>
clone_flags &= ~CLONE_IDLETASK;
-#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
- } else
-#endif
- {
+ } else {
parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
}
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
rtrap_clr_l6: clr %l6
-rtrap:
- ldub [%g6 + TI_CPU], %l0
+rtrap: ldub [%g6 + TI_CPU], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active
or %l2, %lo(irq_stat), %l2 ! &softirq_active
irqsz_patchme: sllx %l0, 0, %l0
*
* On SYSIO, using an 8K page size we have 1GB of SBUS
* DMA space mapped. We divide this space into equally
- * sized clusters. We allocate a DMA mapping from the
- * cluster that matches the order of the allocation, or
- * if the order is greater than the number of clusters,
- * we try to allocate from the last cluster.
+ * sized clusters. Currently we allow clusters up to a
+ * size of 1MB. If anything begins to generate DMA
+ * mapping requests larger than this we will need to
+ * increase things a bit.
*/
#define NCLUSTERS 8UL
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
{
- iopte_t *iopte, *limit, *first, *cluster;
- unsigned long cnum, ent, nent, flush_point, found;
+ iopte_t *iopte, *limit, *first;
+ unsigned long cnum, ent, flush_point;
cnum = 0;
- nent = 1;
while ((1UL << cnum) < npages)
cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
if (cnum == 0)
flush_point = iommu->alloc_info[cnum].flush;
first = iopte;
- cluster = NULL;
- found = 0;
for (;;) {
if (iopte_val(*iopte) == 0UL) {
- found++;
- if (!cluster)
- cluster = iopte;
- } else {
- /* Used cluster in the way */
- cluster = NULL;
- found = 0;
- }
-
- if (found == nent)
+ if ((iopte + (1 << cnum)) >= limit)
+ ent = 0;
+ else
+ ent = ent + 1;
+ iommu->alloc_info[cnum].next = ent;
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
break;
-
+ }
iopte += (1 << cnum);
ent++;
if (iopte >= limit) {
iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
ent = 0;
-
- /* Multiple cluster allocations must not wrap */
- cluster = NULL;
- found = 0;
}
if (ent == flush_point)
__iommu_flushall(iommu);
goto bad;
}
- /* ent/iopte points to the last cluster entry we're going to use,
- * so save our place for the next allocation.
- */
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
-
/* I've got your streaming cluster right here buddy boy... */
- return cluster;
+ return iopte;
bad:
printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{
- unsigned long cnum, ent, nent;
+ unsigned long cnum, ent;
iopte_t *iopte;
cnum = 0;
- nent = 1;
while ((1UL << cnum) < npages)
cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
- do {
- iopte_val(*iopte) = 0UL;
- iopte += 1 << cnum;
- } while(--nent);
+ iopte_val(*iopte) = 0UL;
/* If the global flush might not have caught this entry,
* adjust the flush point such that we will flush before
#include <asm/uaccess.h>
#include <asm/timer.h>
#include <asm/starfire.h>
-#include <asm/tlb.h>
extern int linux_num_cpus;
extern void calibrate_delay(void);
}
}
+extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah;
int cpu = get_cpu();
if (atomic_read(&mm->mm_users) == 1) {
+ /* See smp_flush_tlb_page for info about this. */
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
}
}
}
-void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
+
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
- } else {
- /* This optimization is not valid. Normally
- * we will be holding the page_table_lock, but
- * there is an exception which is copy_page_range()
- * when forking. The lock is held during the individual
- * page table updates in the parent, but not at the
- * top level, which is where we are invoked.
- */
- if (0) {
- cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
- /* By virtue of running under the mm->page_table_lock,
- * and mmu_context.h:switch_mm doing the same, the
- * following operation is safe.
- */
- if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
- goto local_flush_and_out;
- }
}
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
+ smp_cross_call_masked(&xcall_flush_tlb_range,
+ ctx, start, end,
mm->cpu_vm_mask);
-local_flush_and_out:
- __flush_tlb_pending(ctx, nr, vaddrs);
+ local_flush_and_out:
+ __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
+ end, PAGE_SIZE, (end-start));
put_cpu();
}
}
}
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
+{
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ page &= PAGE_MASK;
+ if (mm == current->active_mm &&
+ atomic_read(&mm->mm_users) == 1) {
+ /* By virtue of being the current address space, and
+ * having the only reference to it, the following
+ * operation is safe.
+ *
+ * It would not be a win to perform the xcall tlb
+ * flush in this case, because even if we switch back
+ * to one of the other processors in cpu_vm_mask it
+ * is almost certain that all TLB entries for this
+ * context will be replaced by the time that happens.
+ */
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ } else {
+ cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+ /* By virtue of running under the mm->page_table_lock,
+ * and mmu_context.h:switch_mm doing the same, the
+ * following operation is safe.
+ */
+ if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+ goto local_flush_and_out;
+ }
+
+ /* OK, we have to actually perform the cross call. Most
+ * likely this is a cloned mm or kswapd is kicking out pages
+ * for a task which has run recently on another cpu.
+ */
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ ctx, page, 0,
+ mm->cpu_vm_mask);
+ if (!cpu_isset(cpu, mm->cpu_vm_mask))
+ return;
+
+ local_flush_and_out:
+ __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
+
+ put_cpu();
+ }
+}
+
/* CPU capture. */
/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
/* Out of line spin-locking implementation. */
-EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_lock_flags);
#endif
EXPORT_SYMBOL(synchronize_irq);
#if defined(CONFIG_MCOUNT)
-extern void _mcount(void);
-EXPORT_SYMBOL_NOVERS(_mcount);
+extern void mcount(void);
+EXPORT_SYMBOL_NOVERS(mcount);
#endif
/* CPU online map and active count. */
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(__pte_alloc_one_kernel);
+EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP
EXPORT_SYMBOL(pgt_quicklists);
#endif
#endif
/* Special internal versions of library functions. */
+EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(csum_partial_copy_sparc64);
EXPORT_SYMBOL(ip_fast_csum);
-/* Moving data to/from/in userspace. */
+/* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_in_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__bzero_noasi);
#include <linux/syscalls.h>
#include <linux/ipc.h>
#include <linux/personality.h>
-#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_unmapped_area(NULL, addr, len, pgoff, flags);
+ return get_unmapped_area(NULL, addr, len, pgoff, flags, 0);
}
flags &= ~MAP_SHARED;
align_goal = (64UL * 1024);
do {
- addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags, 0);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags, 0);
return addr;
}
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
return sys_ftruncate(fd, (high << 32) | low);
}
+/* readdir & getdents */
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
+
+struct old_linux_dirent32 {
+ u32 d_ino;
+ u32 d_offset;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct readdir_callback32 {
+ struct old_linux_dirent32 __user * dirent;
+ int count;
+};
+
+static int fillonedir(void * __buf, const char * name, int namlen,
+ loff_t offset, ino_t ino, unsigned int d_type)
+{
+ struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
+ struct old_linux_dirent32 __user * dirent;
+
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
+}
+
+asmlinkage long old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
+{
+ int error = -EBADF;
+ struct file * file;
+ struct readdir_callback32 buf;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.count = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (error < 0)
+ goto out_putf;
+ error = buf.count;
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+struct linux_dirent32 {
+ u32 d_ino;
+ u32 d_off;
+ unsigned short d_reclen;
+ char d_name[1];
+};
+
+struct getdents_callback32 {
+ struct linux_dirent32 __user *current_dir;
+ struct linux_dirent32 __user *previous;
+ int count;
+ int error;
+};
+
+static int filldir(void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
+ unsigned int d_type)
+{
+ struct linux_dirent32 __user * dirent;
+ struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ put_user(d_type, (char __user *) dirent + reclen - 1);
+ dirent = (void __user *) dirent + reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux_dirent32 __user *lastdirent;
+ struct getdents_callback32 buf;
+ int error = -EBADF;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+/* end of readdir & getdents */
+
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
{
int err;
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.text
.align 4
-#ifdef CONFIG_COMPAT
/* First, the 32-bit Linux native syscall table. */
.globl sys_call_table32
.word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
-/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
.word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
.word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
-/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_old_readdir
+/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, old32_readdir
.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
.word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, sys_ni_syscall
/*280*/ .word sys_ni_syscall, sys_ni_syscall, sys_ni_syscall
-#endif /* CONFIG_COMPAT */
-
/* Now the 64-bit native Linux syscall table. */
.align 4
sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
+/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown, sys_mknod
/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
#define TICK_SIZE (tick_nsec / 1000)
-static inline void timer_check_rtc(void)
+static __inline__ void timer_check_rtc(void)
{
/* last time the cmos clock got updated */
static long last_rtc_update;
void sparc64_do_profile(struct pt_regs *regs)
{
- unsigned long pc;
+ unsigned long pc = regs->tpc;
+ unsigned long o7 = regs->u_regs[UREG_RETPC];
profile_hook(regs);
if (!prof_buffer)
return;
- pc = regs->tpc;
-
- pc -= (unsigned long) _stext;
- pc >>= prof_shift;
-
- if(pc >= prof_len)
- pc = prof_len - 1;
- atomic_inc((atomic_t *)&prof_buffer[pc]);
+ {
+ extern int rwlock_impl_begin, rwlock_impl_end;
+ extern int atomic_impl_begin, atomic_impl_end;
+ extern int __memcpy_begin, __memcpy_end;
+ extern int __bzero_begin, __bzero_end;
+ extern int __bitops_begin, __bitops_end;
+
+ if ((pc >= (unsigned long) &atomic_impl_begin &&
+ pc < (unsigned long) &atomic_impl_end) ||
+ (pc >= (unsigned long) &rwlock_impl_begin &&
+ pc < (unsigned long) &rwlock_impl_end) ||
+ (pc >= (unsigned long) &__memcpy_begin &&
+ pc < (unsigned long) &__memcpy_end) ||
+ (pc >= (unsigned long) &__bzero_begin &&
+ pc < (unsigned long) &__bzero_end) ||
+ (pc >= (unsigned long) &__bitops_begin &&
+ pc < (unsigned long) &__bitops_end))
+ pc = o7;
+
+ pc -= (unsigned long) _stext;
+ pc >>= prof_shift;
+
+ if(pc >= prof_len)
+ pc = prof_len - 1;
+ atomic_inc((atomic_t *)&prof_buffer[pc]);
+ }
}
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
*/
void do_gettimeofday(struct timeval *tv)
{
+ unsigned long flags;
unsigned long seq;
unsigned long usec, sec;
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
unsigned long lost;
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
lost = jiffies - wall_jiffies;
usec += lost * tick_usec;
sec = xtime.tv_sec;
-
- /* Believe it or not, this divide shows up on
- * kernel profiles. The problem is that it is
- * both 64-bit and signed. Happily, 32-bits
- * of precision is all we really need and in
- * doing so gcc ends up emitting a cheap multiply.
- *
- * XXX Why is tv_nsec 'long' and 'signed' in
- * XXX the first place, can it even be negative?
- */
- usec += ((unsigned int) xtime.tv_nsec / 1000U);
- } while (read_seqretry(&xtime_lock, seq));
+ usec += (xtime.tv_nsec / 1000);
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
+lib-y := PeeCeeI.o blockops.o strlen.o strncmp.o \
memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
-/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+/* $Id: U3copy_from_user.S,v 1.4 2002/01/15 07:16:26 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy from userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-
-#define XCC xcc
-
-#define EXNV_RAW(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: ba U3cfu_fixup; \
- a, b, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: add %o1, %o3, %o0; \
+99: VISExitHalf; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV4(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 4, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXNV8(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 8, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-
- .register %g2,#scratch
- .register %g3,#scratch
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EX2(x,y) x,y;
+#define EX3(x,y) x,y;
+#define EX4(x,y) x,y;
+#endif
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_from_user
-U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 80f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
-
- /* Here len >= 256 and condition codes reflect execution
+ .globl U3copy_from_user
+U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_from_user_short_ret! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_from_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_from_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_from_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3copy_from_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_from_user_short ! BR
+ stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
+
+U3copy_from_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3copy_from_user_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV_RAW(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
-
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetcha [%o1 + 0x000] %asi, #one_read
- prefetcha [%o1 + 0x040] %asi, #one_read
- andn %o2, (0x40 - 1), %o4
- prefetcha [%o1 + 0x080] %asi, #one_read
- prefetcha [%o1 + 0x0c0] %asi, #one_read
- EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0)
- prefetcha [%o1 + 0x100] %asi, #one_read
- EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0)
- prefetcha [%o1 + 0x140] %asi, #one_read
- EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f0, %f2, %f16
- EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0)
- faligndata %f2, %f4, %f18
- EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0)
- faligndata %f4, %f6, %f20
- EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0)
- faligndata %f6, %f8, %f22
-
- EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0)
- faligndata %f8, %f10, %f24
- EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0)
- faligndata %f10, %f12, %f26
- EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0)
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_from_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
-
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
-
- EX3(ldda [%o1 + 0x040] %asi, %f0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3copy_from_user_begin:
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_1_6
+U3copy_from_user_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
+ prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
+1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
+1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_from_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_from_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_from_user_loop1:
+ EX2(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ EX2(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX2(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ EX2(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ EX2(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ EX2(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ EX2(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ EX2(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_from_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_from_user_loop2:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ EX3(ldda [%o1 + 0x040] %asi, %f0) ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- EX4(ldda [%o1 + 0x040] %asi, %f0)
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
-
- membar #Sync
+U3copy_from_user_loopfini:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ stda %f16, [%o0] ASI_BLK_P ! MS Group20
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ EX4(ldda [%o1 + 0x040] %asi, %f0) ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_2_3
+U3copy_from_user_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_from_user_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 10f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 10f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0)
-
-1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- std %f8, [%o0 + 0x00]
- be,pn %XCC, 10f
- add %o0, 0x8, %o0
- EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- std %f8, [%o0 + 0x00]
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3copy_from_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_from_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-10:
+U3copy_from_user_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3copy_from_user_short_ret
nop
- EXNV(ldxa [%o1] %asi, %o5, add %o2, %g0)
- stx %o5, [%o1 + %o3]
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3copy_from_user_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduwa [%o1] %asi, %o5, and %o2, 0x7)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_from_user_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduha [%o1] %asi, %o5, and %o2, 0x3)
- sth %o5, [%o1 + %o3]
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- EXNV(lduba [%o1] %asi, %o5, and %o2, 0x1)
- ba,pt %xcc, 85f
- stb %o5, [%o1 + %o3]
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- EXNV8(ldxa [%o1] %asi, %o5, add %o2, %o4)
- stx %o5, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- EXNV4(lduwa [%o1] %asi, %o5, add %o2, %g0)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
+
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-1:
- subcc %o2, 4, %o2
- EXNV(lduwa [%o1] %asi, %g1, add %o2, %g0)
- stw %g1, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-85: retl
- clr %o0
+2: VISEntryHalf ! MS+MS
- .align 32
-90:
- subcc %o2, 1, %o2
- EXNV(lduba [%o1] %asi, %g1, add %o2, %g0)
- stb %g1, [%o1 + %o3]
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- clr %o0
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_from_user_short_ret! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_from_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_from_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ stb %o3, [%o0 + -1]
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: EXNV(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ stx %o3, [%o0 + -8]
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_from_user_short
+ nop
+ ba,a,pt %xcc, U3copy_from_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
+#ifdef __KERNEL__
+ .globl U3cfu_fixup
U3cfu_fixup:
/* Since this is copy_from_user(), zero out the rest of the
* kernel buffer.
2: retl
mov %o1, %o0
+#endif
-/* U3copy_in_user.S: UltraSparc-III optimized memcpy.
+/* $Id: U3copy_in_user.S,v 1.4 2001/03/21 05:58:47 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy within userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
-#include <asm/dcu.h>
-#include <asm/spitfire.h>
-
-#define XCC xcc
-
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV1(x,y,a,b) \
+#define EXNV2(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV4(x,y,a,b) \
+#define EXNV3(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
- add %o0, 4, %o0; \
+ add %o0, 8, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV8(x,y,a,b) \
+#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: a, b, %o0; \
+99: VISExitHalf; \
retl; \
- add %o0, 8, %o0; \
+ a, b, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
+#define EXBLK1(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ add %o4, 0x1c0, %o1; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o1, %o2, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK2(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ sll %o3, 6, %o3; \
+ and %o2, (0x40 - 1), %o2; \
+ add %o3, 0x80, %o1; \
+ retl; \
+ add %o1, %o2, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK3(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o2, 0x80, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK4(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o2, 0x40, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#else
+#define ASI_AIUS 0x80
+#define ASI_BLK_AIUS 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EXNV2(x,y,a,b) x,y;
+#define EXNV3(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EXBLK1(x,y) x,y;
+#define EXBLK2(x,y) x,y;
+#define EXBLK3(x,y) x,y;
+#define EXBLK4(x,y) x,y;
+#endif
- .register %g2,#scratch
- .register %g3,#scratch
+ /* Special/non-trivial issues of this code:
+ *
+ * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+ * 2) Only low 32 FPU registers are used so that only the
+ * lower half of the FPU register set is dirtied by this
+ * code. This is especially important in the kernel.
+ * 3) This code never prefetches cachelines past the end
+ * of the source buffer.
+ *
+ * XXX Actually, Cheetah can buffer up to 8 concurrent
+ * XXX prefetches, revisit this...
+ */
.text
.align 32
- /* Don't try to get too fancy here, just nice and
- * simple. This is predominantly used for well aligned
- * small copies in the compat layer. It is also used
- * to copy register windows around during thread cloning.
+ /* The cheetah's flexible spine, oversized liver, enlarged heart,
+ * slender muscular body, and claws make it the swiftest hunter
+ * in Africa and the fastest animal on land. Can reach speeds
+ * of up to 2.4GB per second.
*/
- .globl U3copy_in_user
-U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_in_user
+U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1
- cmp %g1, ASI_AIUS
- bne,pn %icc, U3memcpy_user_stub
- nop
+ rd %asi, %g1 ! MS Group (4 cycles)
+ cmp %g1, ASI_AIUS ! A0 Group
+ bne U3memcpy ! BR
+ nop ! A1
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_in_user_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_in_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_in_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_in_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+ .align 32
+U3copy_in_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_in_user_short ! BR
+ EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
+
+U3copy_in_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
+ * of "andcc %o0, 0x7, %g2", done by caller.
+ */
+ .align 64
+U3copy_in_user_enter:
+ /* Is 'dst' already aligned on an 64-byte boundary? */
+ be,pt %xcc, 2f ! BR
+
+ /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
+ * of bytes to copy to make 'dst' 64-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
+
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
+
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_in_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
+
+ .align 64
+U3copy_in_user_begin:
+ prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
+ prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
+1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
+1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_in_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_in_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_in_user_loop1:
+ EXBLK1(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ EXBLK1(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ EXBLK1(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ EXBLK1(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ EXBLK1(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ EXBLK1(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ EXBLK1(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ EXBLK1(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_in_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_in_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_in_user_loop2:
+ EXBLK2(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ EXBLK2(ldda [%o1 + 0x010] %asi, %f4) ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ EXBLK2(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ EXBLK2(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ EXBLK2(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ EXBLK2(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ EXBLK2(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ EXBLK2(ldda [%o1 + 0x040] %asi, %f0) ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_in_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+ /* Finally we copy the last full 64-byte block. */
+U3copy_in_user_loopfini:
+ EXBLK3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ EXBLK3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
+ EXBLK4(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ EXBLK4(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ EXBLK4(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ EXBLK4(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ EXBLK4(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ EXBLK4(ldda [%o1 + 0x040] %asi, %f0) ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+ membar #Sync ! MS Group26 (7-cycle stall)
+
+ /* Now we copy the (len modulo 64) bytes at the end.
+ * Note how we borrow the %f0 loaded above.
+ *
+ * Also notice how this code is careful not to perform a
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_in_user_toosmall processing.
+ */
+U3copy_in_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_in_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_in_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_in_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
+
+ /* If anything is left, we copy it one byte at a time.
+ * Note that %g1 is (src & 0x3) saved above before the
+ * alignaddr was performed.
+ */
+U3copy_in_user_endcruft:
cmp %o2, 0
- be,pn %XCC, out
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, small_copy
- or %o3, %o2, %o3
-
-medium_copy: /* 16 < len <= 64 */
- andcc %o3, 0x7, %g0
- bne,pn %XCC, small_copy_unaligned
- sub %o0, %o1, %o3
-
-medium_copy_aligned:
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- EXNV8(ldxa [%o1] %asi, %o5, add %o4, %o2)
- EXNV8(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- EXNV4(lduwa [%o1] %asi, %o5, add %o4, %o2)
- EXNV4(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, out
- nop
- ba,pt %xcc, small_copy_unaligned
+ add %o1, %g1, %o1
+ VISExitHalf
+ be,pn %icc, U3copy_in_user_short_ret
nop
+ ba,a,pt %xcc, U3copy_in_user_short
-small_copy: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, small_copy_unaligned
- sub %o0, %o1, %o3
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_in_user_toosmall:
-small_copy_aligned:
- subcc %o2, 4, %o2
- EXNV4(lduwa [%o1] %asi, %g1, add %o2, %g0)
- EXNV4(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, small_copy_aligned
- add %o1, 4, %o1
+#ifdef SMALL_COPY_USES_FPU
-out: retl
- clr %o0
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
- .align 32
-small_copy_unaligned:
- subcc %o2, 1, %o2
- EXNV1(lduba [%o1] %asi, %g1, add %o2, %g0)
- EXNV1(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, small_copy_unaligned
- add %o1, 1, %o1
- retl
- clr %o0
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
+
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
+
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+
+2: VISEntryHalf ! MS+MS
+
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_in_user_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_in_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_in_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: EXNV3(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_in_user_short
+ nop
+ ba,a,pt %xcc, U3copy_in_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
-/* U3copy_to_user.S: UltraSparc-III optimized memcpy.
+/* $Id: U3copy_to_user.S,v 1.3 2000/11/01 09:29:19 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy to userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-
-#define XCC xcc
-
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.text; \
.align 4;
#define EXNV3(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: a, b, %o0; \
- retl; \
- add %o0, 4, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-
- .register %g2,#scratch
- .register %g3,#scratch
+#else
+#define ASI_AIUS 0x80
+#define ASI_BLK_AIUS 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EXNV2(x,y,a,b) x,y;
+#define EXNV3(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EXBLK1(x,y) x,y;
+#define EXBLK2(x,y) x,y;
+#define EXBLK3(x,y) x,y;
+#define EXBLK4(x,y) x,y;
+#endif
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_to_user
-U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_to_user
+U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1
- cmp %g1, ASI_AIUS
- bne,pn %icc, U3memcpy_user_stub
- nop
-
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 80f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
-
- /* Here len >= 256 and condition codes reflect execution
+ rd %asi, %g1 ! MS Group (4 cycles)
+ cmp %g1, ASI_AIUS ! A0 Group
+ bne U3memcpy ! BR
+ nop ! A1
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_to_user_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_to_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_to_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_to_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3copy_to_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ ldub [%o1 + 0x00], %o3 ! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_to_user_short ! BR
+ EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
+
+U3copy_to_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3copy_to_user_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
-
- bg,pt %XCC, 1b
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+1: ldub [%o1 + 0x00], %o3 ! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetch [%o1 + 0x000], #one_read
- prefetch [%o1 + 0x040], #one_read
- andn %o2, (0x40 - 1), %o4
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x018], %f6
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x020], %f8
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x028], %f10
- faligndata %f6, %f8, %f22
-
- ldd [%o1 + 0x030], %f12
- faligndata %f8, %f10, %f24
- ldd [%o1 + 0x038], %f14
- faligndata %f10, %f12, %f26
- ldd [%o1 + 0x040], %f0
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_to_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS)
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
-
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
-
- ldd [%o1 + 0x040], %f0
- prefetch [%o1 + 0x180], #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3copy_to_user_begin:
+#ifdef __KERNEL__
+ .globl U3copy_to_user_nop_1_6
+U3copy_to_user_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetch [%o1 + 0x000], #one_read ! MS Group1
+ prefetch [%o1 + 0x040], #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetch [%o1 + 0x080], #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group4
+ ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x100], #one_read ! MS Group6
+1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetch [%o1 + 0x140], #one_read ! MS Group7
+1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x180], #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_to_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_to_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_to_user_loop1:
+ ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
+ prefetch [%o1 + 0x180], #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_to_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_to_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_to_user_loop2:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ ldd [%o1 + 0x010], %f4 ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ ldd [%o1 + 0x040], %f0 ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_to_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS)
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- ldd [%o1 + 0x040], %f0
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS)
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
-
- membar #Sync
+U3copy_to_user_loopfini:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ ldd [%o1 + 0x010], %f4 ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ ldd [%o1 + 0x040], %f0 ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3copy_to_user_nop_2_3
+U3copy_to_user_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_to_user_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 2f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 2f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- ldd [%o1 + 0x00], %f0
-
-1: ldd [%o1 + 0x08], %f2
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
- be,pn %XCC, 2f
- add %o0, 0x8, %o0
- ldd [%o1 + 0x08], %f0
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3copy_to_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_to_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_to_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ ldd [%o1 + 0x00], %f0 ! MS
+
+1: ldd [%o1 + 0x08], %f2 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_to_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ ldd [%o1 + 0x08], %f0 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-2:
+U3copy_to_user_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3copy_to_user_short_ret
nop
- ldx [%o1], %o5
- EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3copy_to_user_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- lduw [%o1], %o5
- EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7)
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_to_user_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- lduh [%o1], %o5
- EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3)
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- ldub [%o1], %o5
- ba,pt %xcc, 85f
- EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1)
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- ldx [%o1], %o5
- EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4)
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- lduw [%o1], %o5
- EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-1:
- subcc %o2, 4, %o2
- lduw [%o1], %g1
- EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-85: retl
- clr %o0
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
- .align 32
-90:
- subcc %o2, 1, %o2
- ldub [%o1], %g1
- EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- clr %o0
+2: VISEntryHalf ! MS+MS
+
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: ldd [%g1 + 0x00], %f2 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ ldd [%g1 + 0x00], %f0 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_to_user_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_to_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_to_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: ldx [%o1 + 0x00], %o3
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_to_user_short
+ nop
+ ba,a,pt %xcc, U3copy_to_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
-/* U3memcpy.S: UltraSparc-III optimized memcpy.
+/* $Id: U3memcpy.S,v 1.2 2000/11/01 09:29:19 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized memcpy.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
#ifdef __KERNEL__
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
+#undef SMALL_COPY_USES_FPU
#else
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
#endif
-#ifndef XCC
-#define XCC xcc
-#endif
-
- .register %g2,#scratch
- .register %g3,#scratch
-
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* of up to 2.4GB per second.
*/
- .globl U3memcpy
-U3memcpy: /* %o0=dst, %o1=src, %o2=len */
- mov %o0, %g5
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 70f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 80f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
+ .globl U3memcpy
+U3memcpy: /* %o0=dst, %o1=src, %o2=len */
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3memcpy_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3memcpy_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3memcpy_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3memcpy_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3memcpy_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ ldub [%o1 + 0x00], %o3 ! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3memcpy_short ! BR
+ stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
+
+U3memcpy_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
- /* Here len >= 256 and condition codes reflect execution
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3memcpy_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
+1: ldub [%o1 + 0x00], %o3 ! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
-
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetch [%o1 + 0x000], #one_read
- prefetch [%o1 + 0x040], #one_read
- andn %o2, (0x40 - 1), %o4
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x018], %f6
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x020], %f8
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x028], %f10
- faligndata %f6, %f8, %f22
-
- ldd [%o1 + 0x030], %f12
- faligndata %f8, %f10, %f24
- ldd [%o1 + 0x038], %f14
- faligndata %f10, %f12, %f26
- ldd [%o1 + 0x040], %f0
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3memcpy_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
-
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
-
- ldd [%o1 + 0x040], %f0
- prefetch [%o1 + 0x180], #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3memcpy_begin:
+#ifdef __KERNEL__
+ .globl U3memcpy_nop_1_6
+U3memcpy_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetch [%o1 + 0x000], #one_read ! MS Group1
+ prefetch [%o1 + 0x040], #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetch [%o1 + 0x080], #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group4
+ ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x100], #one_read ! MS Group6
+1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetch [%o1 + 0x140], #one_read ! MS Group7
+1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x180], #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3memcpy_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3memcpy_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3memcpy_loop1:
+ ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
+ prefetch [%o1 + 0x180], #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3memcpy_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3memcpy_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3memcpy_loop2:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ ldd [%o1 + 0x010], %f4 ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ ldd [%o1 + 0x040], %f0 ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3memcpy_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- ldd [%o1 + 0x040], %f0
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
- membar #Sync
+U3memcpy_loopfini:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ ldd [%o1 + 0x010], %f4 ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ stda %f16, [%o0] ASI_BLK_P ! MS Group20
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ ldd [%o1 + 0x040], %f0 ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3memcpy_nop_2_3
+U3memcpy_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3memcpy_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 2f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 2f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- ldd [%o1 + 0x00], %f0
-
-1: ldd [%o1 + 0x08], %f2
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- std %f8, [%o0 + 0x00]
- be,pn %XCC, 2f
- add %o0, 0x8, %o0
- ldd [%o1 + 0x08], %f0
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- std %f8, [%o0 + 0x00]
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3memcpy_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3memcpy_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3memcpy_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ ldd [%o1 + 0x00], %f0 ! MS
+
+1: ldd [%o1 + 0x08], %f2 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3memcpy_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ ldd [%o1 + 0x08], %f0 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-2:
+U3memcpy_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3memcpy_short_ret
nop
- ldx [%o1], %o5
- stx %o5, [%o1 + %o3]
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3memcpy_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- lduw [%o1], %o5
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3memcpy_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- lduh [%o1], %o5
- sth %o5, [%o1 + %o3]
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- ldub [%o1], %o5
- ba,pt %xcc, 85f
- stb %o5, [%o1 + %o3]
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- ldx [%o1], %o5
- stx %o5, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- lduw [%o1], %o5
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
+
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-1:
- subcc %o2, 4, %o2
- lduw [%o1], %g1
- stw %g1, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-85: retl
- mov %g5, %o0
+2: VISEntryHalf ! MS+MS
- .align 32
-90:
- subcc %o2, 1, %o2
- ldub [%o1], %g1
- stb %g1, [%o1 + %o3]
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- mov %g5, %o0
-
- /* Act like copy_{to,in}_user(), ie. return zero instead
- * of original destination pointer. This is invoked when
- * copy_{to,in}_user() finds that %asi is kernel space.
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
*/
- .globl U3memcpy_user_stub
-U3memcpy_user_stub:
- save %sp, -192, %sp
- mov %i0, %o0
- mov %i1, %o1
- call U3memcpy
- mov %i2, %o2
- ret
- restore %g0, %g0, %o0
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: ldd [%g1 + 0x00], %f2 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ ldd [%g1 + 0x00], %f0 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3memcpy_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3memcpy_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3memcpy_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ stb %o3, [%o0 + -1]
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: ldx [%o1 + 0x00], %o3
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ stx %o3, [%o0 + -8]
+
+ cmp %o2, 0
+ bne,pn %icc, U3memcpy_short
+ nop
+ ba,a,pt %xcc, U3memcpy_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
.text
.align 32
#ifdef __KERNEL__
+ .globl __bzero_begin
+__bzero_begin:
.globl __bzero, __bzero_noasi
__bzero_noasi:
rd %asi, %g5
ba,pt %xcc, VISbzerofixup_ret0
sub %o1, %g2, %o0
#endif
+ .globl __bzero_end
+__bzero_end:
.type bcopy,@function
#ifdef __KERNEL__
+ .globl __memcpy_begin
+__memcpy_begin:
+
+ .globl __memcpy
+ .type __memcpy,@function
+
memcpy_private:
+__memcpy:
memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
+#define ULTRA3_PCACHE_DO_NOP(symbol) \
+ sethi %hi(symbol##_nop_1_6), %g1; \
+ or %g1, %lo(symbol##_nop_1_6), %g1; \
+ sethi %hi(NOP), %g2; \
+ stw %g2, [%g1 + 0x00]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x00; \
+ stw %g2, [%g1 + 0x08]; \
+ stw %g2, [%g1 + 0x0c]; \
+ flush %g1 + 0x08; \
+ stw %g2, [%g1 + 0x10]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x10; \
+ sethi %hi(symbol##_nop_2_3), %g1; \
+ or %g1, %lo(symbol##_nop_2_3), %g1; \
+ stw %g2, [%g1 + 0x00]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x00; \
+ stw %g2, [%g1 + 0x08]; \
+ flush %g1 + 0x08;
+
+#include <asm/dcu.h>
.globl cheetah_patch_copyops
cheetah_patch_copyops:
ULTRA3_DO_PATCH(__copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(__copy_to_user, U3copy_to_user)
ULTRA3_DO_PATCH(__copy_in_user, U3copy_in_user)
+#if 0 /* Causes data corruption, nop out the optimization
+ * for now -DaveM
+ */
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ andcc %g3, %o3, %g0
+ be,pn %xcc, pcache_disabled
+ nop
+#endif
+ ULTRA3_PCACHE_DO_NOP(U3memcpy)
+ ULTRA3_PCACHE_DO_NOP(U3copy_from_user)
+ ULTRA3_PCACHE_DO_NOP(U3copy_to_user)
+ ULTRA3_PCACHE_DO_NOP(cheetah_copy_user_page)
+#if 0
+pcache_disabled:
+#endif
retl
nop
#undef BRANCH_ALWAYS
FPU_RETL
#ifdef __KERNEL__
+ .globl __memcpy_end
+__memcpy_end:
+
.section .fixup
.align 4
VIScopyfixup_reto2:
.text
.align 64
+ .globl atomic_impl_begin, atomic_impl_end
+
.globl __atomic_add
+atomic_impl_begin:
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5
add %g5, %o0, %g7
retl
sub %g7, %o0, %o0
+atomic_impl_end:
.text
.align 64
+ .globl __bitops_begin
+__bitops_begin:
+
.globl ___test_and_set_bit
___test_and_set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
lduwa [%o1] ASI_PL, %g7
2: retl
membar #StoreLoad | #StoreStore
+
+ .globl __bitops_end
+__bitops_end:
--- /dev/null
+/* $Id: blockops.S,v 1.42 2002/02/09 19:49:30 davem Exp $
+ * blockops.S: UltraSparc block zero optimized routines.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include "VIS.h"
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+#include <asm/pgtable.h>
+
+#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
+ fmovd %reg0, %f48; fmovd %reg1, %f50; \
+ fmovd %reg2, %f52; fmovd %reg3, %f54; \
+ fmovd %reg4, %f56; fmovd %reg5, %f58; \
+ fmovd %reg6, %f60; fmovd %reg7, %f62;
+
+#define DCACHE_SIZE (PAGE_SIZE * 2)
+#define TLBTEMP_ENT1 (60 << 3)
+#define TLBTEMP_ENT2 (61 << 3)
+#define TLBTEMP_ENTSZ (1 << 3)
+
+#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
+#define PAGE_SIZE_REM 0x80
+#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+#define PAGE_SIZE_REM 0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+ .text
+
+ .align 32
+ .globl copy_user_page
+ .type copy_user_page,@function
+copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
+ VISEntry
+ sethi %hi(PAGE_SIZE), %g3
+ sethi %uhi(PAGE_OFFSET), %g2
+ sllx %g2, 32, %g2
+ sub %o0, %g2, %g1
+ and %o2, %g3, %o0
+ sethi %hi(TLBTEMP_BASE), %o3
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
+ sub %o1, %g2, %g2
+ sllx %g3, 32, %g3
+ mov TLB_TAG_ACCESS, %o2
+ or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
+ sethi %hi(DCACHE_SIZE), %o1
+ or %g1, %g3, %g1
+ or %g2, %g3, %g2
+ add %o0, %o3, %o0
+ add %o0, %o1, %o1
+#define FIX_INSN_1 0x96102060 /* mov (12 << 3), %o3 */
+cheetah_patch_1:
+ mov TLBTEMP_ENT1, %o3
+ rdpr %pstate, %g3
+ wrpr %g3, PSTATE_IE, %pstate
+
+ /* Do this now, before loading the fixed TLB entries for copying,
+ * so we do not risk a multiple TLB match condition later when
+ * restoring those entries.
+ */
+ ldx [%g6 + TI_FLAGS], %g3
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %o4
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %o5
+ stxa %g0, [%o5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %o5
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g1, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ add %o3, (TLBTEMP_ENTSZ), %o3
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g5
+ stxa %g0, [%g5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %g5
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g7
+ stxa %o1, [%o2] ASI_DMMU
+ stxa %g2, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ andcc %g3, _TIF_BLKCOMMIT, %g0
+ bne,pn %xcc, copy_page_using_blkcommit
+ nop
+
+ BRANCH_IF_ANY_CHEETAH(g3,o2,cheetah_copy_user_page)
+ ba,pt %xcc, spitfire_copy_user_page
+ nop
+
+cheetah_copy_user_page:
+ .globl cheetah_copy_user_page_nop_1_6
+cheetah_copy_user_page_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o2
+ sllx %o2, 32, %o2
+ or %g3, %o2, %o2
+ stxa %o2, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+
+ sethi %hi((PAGE_SIZE/64)-7), %o2 ! A0 Group
+ prefetch [%o1 + 0x000], #one_read ! MS
+ or %o2, %lo((PAGE_SIZE/64)-7), %o2 ! A1 Group
+ prefetch [%o1 + 0x040], #one_read ! MS
+ prefetch [%o1 + 0x080], #one_read ! MS Group
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group
+ ldd [%o1 + 0x000], %f0 ! MS Group
+ prefetch [%o1 + 0x100], #one_read ! MS Group
+ ldd [%o1 + 0x008], %f2 ! AX
+ prefetch [%o1 + 0x140], #one_read ! MS Group
+ ldd [%o1 + 0x010], %f4 ! AX
+ prefetch [%o1 + 0x180], #one_read ! MS Group
+ fmovd %f0, %f32 ! FGA Group
+ ldd [%o1 + 0x018], %f6 ! AX
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x020], %f8 ! MS
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x028], %f10 ! AX
+ membar #StoreStore ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x030], %f12 ! MS
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x038], %f14 ! AX
+ fmovd %f10, %f42 ! FGA Group
+ ldd [%o1 + 0x040], %f16 ! MS
+1: ldd [%o1 + 0x048], %f2 ! AX (Group)
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x080], %f16 ! AX
+ prefetch [%o1 + 0x180], #one_read ! MS
+ fmovd %f10, %f42 ! FGA Group
+ subcc %o2, 1, %o2 ! A0
+ add %o0, 0x40, %o0 ! A1
+ bne,pt %xcc, 1b ! BR
+ add %o1, 0x40, %o1 ! A0 Group
+
+ mov 5, %o2 ! A0 Group
+1: ldd [%o1 + 0x048], %f2 ! AX
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x080], %f16 ! MS
+ fmovd %f10, %f42 ! FGA Group
+ subcc %o2, 1, %o2 ! A0
+ add %o0, 0x40, %o0 ! A1
+ bne,pt %xcc, 1b ! BR
+ add %o1, 0x40, %o1 ! A0 Group
+
+ ldd [%o1 + 0x048], %f2 ! AX
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ add %o0, 0x40, %o0 ! A0
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ fmovd %f10, %f42 ! FGA Group
+ fmovd %f12, %f44 ! FGA Group
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ .globl cheetah_copy_user_page_nop_2_3
+cheetah_copy_user_page_nop_2_3:
+ mov PRIMARY_CONTEXT, %o2
+ stxa %g0, [%o2] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+ ba,a,pt %xcc, copy_user_page_continue
+
+spitfire_copy_user_page:
+ ldda [%o1] ASI_BLK_P, %f0
+ add %o1, 0x40, %o1
+ ldda [%o1] ASI_BLK_P, %f16
+ add %o1, 0x40, %o1
+ sethi %hi(PAGE_SIZE), %o2
+1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+ ldda [%o1] ASI_BLK_P, %f16
+ stda %f48, [%o0] ASI_BLK_P
+ sub %o2, 0x40, %o2
+ add %o1, 0x40, %o1
+ cmp %o2, PAGE_SIZE_REM
+ bne,pt %xcc, 1b
+ add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f0, [%o0] ASI_BLK_P
+#else
+ membar #Sync
+ stda %f0, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f16, [%o0] ASI_BLK_P
+#endif
+copy_user_page_continue:
+ membar #Sync
+ VISExit
+
+ mov TLB_TAG_ACCESS, %o2
+ stxa %g5, [%o2] ASI_DMMU
+ stxa %g7, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ sub %o3, (TLBTEMP_ENTSZ), %o3
+ stxa %o4, [%o2] ASI_DMMU
+ stxa %o5, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ rdpr %pstate, %g3
+ jmpl %o7 + 0x8, %g0
+ wrpr %g3, PSTATE_IE, %pstate
+
+copy_page_using_blkcommit:
+ membar #LoadStore | #StoreStore | #StoreLoad
+ ldda [%o1] ASI_BLK_P, %f0
+ add %o1, 0x40, %o1
+ ldda [%o1] ASI_BLK_P, %f16
+ add %o1, 0x40, %o1
+ sethi %hi(PAGE_SIZE), %o2
+1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+ ldda [%o1] ASI_BLK_P, %f16
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ sub %o2, 0x40, %o2
+ add %o1, 0x40, %o1
+ cmp %o2, PAGE_SIZE_REM
+ bne,pt %xcc, 1b
+ add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_COMMIT_P
+ add %o0, 0x40, %o0
+ ba,pt %xcc, copy_user_page_continue
+ stda %f0, [%o0] ASI_BLK_COMMIT_P
+#else
+ membar #Sync
+ stda %f0, [%o0] ASI_BLK_COMMIT_P
+ add %o0, 0x40, %o0
+ ba,pt %xcc, copy_user_page_continue
+ stda %f16, [%o0] ASI_BLK_COMMIT_P
+#endif
+
+ .align 32
+ .globl _clear_page
+ .type _clear_page,@function
+_clear_page: /* %o0=dest */
+ VISEntryHalf
+ ba,pt %xcc, clear_page_common
+ clr %o4
+
+ .align 32
+ .globl clear_user_page
+ .type clear_user_page,@function
+clear_user_page: /* %o0=dest, %o1=vaddr */
+ VISEntryHalf
+ sethi %hi(PAGE_SIZE), %g3
+ sethi %uhi(PAGE_OFFSET), %g2
+ sllx %g2, 32, %g2
+ sub %o0, %g2, %g1
+ and %o1, %g3, %o0
+ mov TLB_TAG_ACCESS, %o2
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
+ sethi %hi(TLBTEMP_BASE), %o3
+ sllx %g3, 32, %g3
+ or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
+ or %g1, %g3, %g1
+ add %o0, %o3, %o0
+#define FIX_INSN_2 0x96102068 /* mov (13 << 3), %o3 */
+cheetah_patch_2:
+ mov TLBTEMP_ENT2, %o3
+ rdpr %pstate, %g3
+ wrpr %g3, PSTATE_IE, %pstate
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g5
+ stxa %g0, [%g5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %g5
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g7
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g1, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ mov 1, %o4
+
+clear_page_common:
+ membar #StoreLoad | #StoreStore | #LoadStore ! LSU Group
+ fzero %f0 ! FPA Group
+ sethi %hi(PAGE_SIZE/256), %o1 ! IEU0
+ fzero %f2 ! FPA Group
+ or %o1, %lo(PAGE_SIZE/256), %o1 ! IEU0
+ faddd %f0, %f2, %f4 ! FPA Group
+ fmuld %f0, %f2, %f6 ! FPM
+ faddd %f0, %f2, %f8 ! FPA Group
+ fmuld %f0, %f2, %f10 ! FPM
+
+ faddd %f0, %f2, %f12 ! FPA Group
+ fmuld %f0, %f2, %f14 ! FPM
+1: stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ add %o0, 0x40, %o0 ! IEU0
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ add %o0, 0x40, %o0 ! IEU0
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+
+ add %o0, 0x40, %o0 ! IEU0 Group
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ subcc %o1, 1, %o1 ! IEU1
+ bne,pt %icc, 1b ! CTI
+ add %o0, 0x40, %o0 ! IEU0 Group
+ membar #Sync ! LSU Group
+ VISExitHalf
+
+ brnz,pt %o4, 1f
+ nop
+
+ retl
+ nop
+
+1:
+ stxa %g5, [%o2] ASI_DMMU
+ stxa %g7, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ jmpl %o7 + 0x8, %g0
+ wrpr %g3, 0x0, %pstate
+
+ .globl cheetah_patch_pgcopyops
+cheetah_patch_pgcopyops:
+ sethi %hi(FIX_INSN_1), %g1
+ or %g1, %lo(FIX_INSN_1), %g1
+ sethi %hi(cheetah_patch_1), %g2
+ or %g2, %lo(cheetah_patch_1), %g2
+ stw %g1, [%g2]
+ flush %g2
+ sethi %hi(FIX_INSN_2), %g1
+ or %g1, %lo(FIX_INSN_2), %g1
+ sethi %hi(cheetah_patch_2), %g2
+ or %g2, %lo(cheetah_patch_2), %g2
+ stw %g1, [%g2]
+ flush %g2
+ retl
+ nop
+
+#undef FIX_INSN1
+#undef FIX_INSN2
+#undef PAGE_SIZE_REM
+++ /dev/null
-/* clear_page.S: UltraSparc optimized clear page.
- *
- * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
- * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <asm/visasm.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/spitfire.h>
-
- /* What we used to do was lock a TLB entry into a specific
- * TLB slot, clear the page with interrupts disabled, then
- * restore the original TLB entry. This was great for
- * disturbing the TLB as little as possible, but it meant
- * we had to keep interrupts disabled for a long time.
- *
- * Now, we simply use the normal TLB loading mechanism,
- * and this makes the cpu choose a slot all by itself.
- * Then we do a normal TLB flush on exit. We need only
- * disable preemption during the clear.
- */
-
-#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-
- .text
-
- .globl _clear_page
-_clear_page: /* %o0=dest */
- ba,pt %xcc, clear_page_common
- clr %o4
-
- /* This thing is pretty important, it shows up
- * on the profiles via do_anonymous_page().
- */
- .align 32
- .globl clear_user_page
-clear_user_page: /* %o0=dest, %o1=vaddr */
- lduw [%g6 + TI_PRE_COUNT], %o2
- sethi %uhi(PAGE_OFFSET), %g2
- sethi %hi(PAGE_SIZE), %o4
-
- sllx %g2, 32, %g2
- sethi %uhi(TTE_BITS_TOP), %g3
-
- sllx %g3, 32, %g3
- sub %o0, %g2, %g1 ! paddr
-
- or %g3, TTE_BITS_BOTTOM, %g3
- and %o1, %o4, %o0 ! vaddr D-cache alias bit
-
- or %g1, %g3, %g1 ! TTE data
- sethi %hi(TLBTEMP_BASE), %o3
-
- add %o2, 1, %o4
- add %o0, %o3, %o0 ! TTE vaddr
-
- /* Disable preemption. */
- mov TLB_TAG_ACCESS, %g3
- stw %o4, [%g6 + TI_PRE_COUNT]
-
- /* Load TLB entry. */
- rdpr %pstate, %o4
- wrpr %o4, PSTATE_IE, %pstate
- stxa %o0, [%g3] ASI_DMMU
- stxa %g1, [%g0] ASI_DTLB_DATA_IN
- flush %g6
- wrpr %o4, 0x0, %pstate
-
- mov 1, %o4
-
-clear_page_common:
- VISEntryHalf
- membar #StoreLoad | #StoreStore | #LoadStore
- fzero %f0
- sethi %hi(PAGE_SIZE/64), %o1
- mov %o0, %g1 ! remember vaddr for tlbflush
- fzero %f2
- or %o1, %lo(PAGE_SIZE/64), %o1
- faddd %f0, %f2, %f4
- fmuld %f0, %f2, %f6
- faddd %f0, %f2, %f8
- fmuld %f0, %f2, %f10
-
- faddd %f0, %f2, %f12
- fmuld %f0, %f2, %f14
-1: stda %f0, [%o0 + %g0] ASI_BLK_P
- subcc %o1, 1, %o1
- bne,pt %icc, 1b
- add %o0, 0x40, %o0
- membar #Sync
- VISExitHalf
-
- brz,pn %o4, out
- nop
-
- stxa %g0, [%g1] ASI_DMMU_DEMAP
- membar #Sync
- stw %o2, [%g6 + TI_PRE_COUNT]
-
-out: retl
- nop
-
+++ /dev/null
-/* clear_page.S: UltraSparc optimized copy page.
- *
- * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
- * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <asm/visasm.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/spitfire.h>
-#include <asm/head.h>
-
- /* What we used to do was lock a TLB entry into a specific
- * TLB slot, clear the page with interrupts disabled, then
- * restore the original TLB entry. This was great for
- * disturbing the TLB as little as possible, but it meant
- * we had to keep interrupts disabled for a long time.
- *
- * Now, we simply use the normal TLB loading mechanism,
- * and this makes the cpu choose a slot all by itself.
- * Then we do a normal TLB flush on exit. We need only
- * disable preemption during the clear.
- */
-
-#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-#define DCACHE_SIZE (PAGE_SIZE * 2)
-
-#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
-#define PAGE_SIZE_REM 0x80
-#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
-#define PAGE_SIZE_REM 0x100
-#else
-#error Wrong PAGE_SHIFT specified
-#endif
-
-#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
- fmovd %reg0, %f48; fmovd %reg1, %f50; \
- fmovd %reg2, %f52; fmovd %reg3, %f54; \
- fmovd %reg4, %f56; fmovd %reg5, %f58; \
- fmovd %reg6, %f60; fmovd %reg7, %f62;
-
- .text
-
- .align 32
- .globl copy_user_page
-copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
- lduw [%g6 + TI_PRE_COUNT], %o4
- sethi %uhi(PAGE_OFFSET), %g2
- sethi %hi(PAGE_SIZE), %o3
-
- sllx %g2, 32, %g2
- sethi %uhi(TTE_BITS_TOP), %g3
-
- sllx %g3, 32, %g3
- sub %o0, %g2, %g1 ! dest paddr
-
- sub %o1, %g2, %g2 ! src paddr
- or %g3, TTE_BITS_BOTTOM, %g3
-
- and %o2, %o3, %o0 ! vaddr D-cache alias bit
- or %g1, %g3, %g1 ! dest TTE data
-
- or %g2, %g3, %g2 ! src TTE data
- sethi %hi(TLBTEMP_BASE), %o3
-
- sethi %hi(DCACHE_SIZE), %o1
- add %o0, %o3, %o0 ! dest TTE vaddr
-
- add %o4, 1, %o2
- add %o0, %o1, %o1 ! src TTE vaddr
-
- /* Disable preemption. */
- mov TLB_TAG_ACCESS, %g3
- stw %o2, [%g6 + TI_PRE_COUNT]
-
- /* Load TLB entries. */
- rdpr %pstate, %o2
- wrpr %o2, PSTATE_IE, %pstate
- stxa %o0, [%g3] ASI_DMMU
- stxa %g1, [%g0] ASI_DTLB_DATA_IN
- membar #Sync
- stxa %o1, [%g3] ASI_DMMU
- stxa %g2, [%g0] ASI_DTLB_DATA_IN
- membar #Sync
- wrpr %o2, 0x0, %pstate
-
- BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
- ba,pt %xcc, 9f
- nop
-
-1:
- VISEntryHalf
- membar #StoreLoad | #StoreStore | #LoadStore
- sethi %hi((PAGE_SIZE/64)-2), %o2
- mov %o0, %g1
- prefetch [%o1 + 0x000], #one_read
- or %o2, %lo((PAGE_SIZE/64)-2), %o2
- prefetch [%o1 + 0x040], #one_read
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- fmovd %f0, %f16
- ldd [%o1 + 0x018], %f6
- fmovd %f2, %f18
- ldd [%o1 + 0x020], %f8
- fmovd %f4, %f20
- ldd [%o1 + 0x028], %f10
- fmovd %f6, %f22
- ldd [%o1 + 0x030], %f12
- fmovd %f8, %f24
- ldd [%o1 + 0x038], %f14
- fmovd %f10, %f26
- ldd [%o1 + 0x040], %f0
-1: ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
- ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
- ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
- ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
- ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
- ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
- ldd [%o1 + 0x080], %f0
- prefetch [%o1 + 0x180], #one_read
- fmovd %f10, %f26
- subcc %o2, 1, %o2
- add %o0, 0x40, %o0
- bne,pt %xcc, 1b
- add %o1, 0x40, %o1
-
- ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
- ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
- ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
- ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
- ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
- add %o0, 0x40, %o0
- ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
- fmovd %f10, %f26
- fmovd %f12, %f28
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- membar #Sync
- VISExitHalf
- ba,pt %xcc, 5f
- nop
-
-9:
- VISEntry
- ldub [%g6 + TI_FAULT_CODE], %g3
- mov %o0, %g1
- cmp %g3, 0
- rd %asi, %g3
- be,a,pt %icc, 1f
- wr %g0, ASI_BLK_P, %asi
- wr %g0, ASI_BLK_COMMIT_P, %asi
-1: ldda [%o1] ASI_BLK_P, %f0
- add %o1, 0x40, %o1
- ldda [%o1] ASI_BLK_P, %f16
- add %o1, 0x40, %o1
- sethi %hi(PAGE_SIZE), %o2
-1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
- ldda [%o1] ASI_BLK_P, %f32
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
- ldda [%o1] ASI_BLK_P, %f0
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
- ldda [%o1] ASI_BLK_P, %f16
- stda %f48, [%o0] %asi
- sub %o2, 0x40, %o2
- add %o1, 0x40, %o1
- cmp %o2, PAGE_SIZE_REM
- bne,pt %xcc, 1b
- add %o0, 0x40, %o0
-#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
- TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
- ldda [%o1] ASI_BLK_P, %f32
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
- ldda [%o1] ASI_BLK_P, %f0
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- membar #Sync
- stda %f32, [%o0] %asi
- add %o0, 0x40, %o0
- stda %f0, [%o0] %asi
-#else
- membar #Sync
- stda %f0, [%o0] %asi
- add %o0, 0x40, %o0
- stda %f16, [%o0] %asi
-#endif
- membar #Sync
- wr %g3, 0x0, %asi
- VISExit
-
-5:
- stxa %g0, [%g1] ASI_DMMU_DEMAP
- membar #Sync
-
- sethi %hi(DCACHE_SIZE), %g2
- stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP
- membar #Sync
-
- retl
- stw %o4, [%g6 + TI_PRE_COUNT]
.text
.align 64
+ .globl rwlock_impl_begin, rwlock_impl_end
+
/* The non-contention read lock usage is 2 cache lines. */
.globl __read_lock, __read_unlock
+rwlock_impl_begin:
__read_lock: /* %o0 = lock_ptr */
ldsw [%o0], %g5
brlz,pn %g5, __read_wait_for_writer
__write_trylock_fail:
retl
mov 0, %o0
+rwlock_impl_end:
.text
.align 64
- .globl _raw_spin_lock
-_raw_spin_lock: /* %o0 = lock_ptr */
-1: ldstub [%o0], %g7
- brnz,pn %g7, 2f
- membar #StoreLoad | #StoreStore
- retl
- nop
-2: ldub [%o0], %g7
- brnz,pt %g7, 2b
- membar #LoadLoad
- ba,a,pt %xcc, 1b
-
.globl _raw_spin_lock_flags
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
+obj-y := ultra.o fault.o init.o generic.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
- set_thread_fault_code(fault_code |
- FAULT_CODE_BLKCOMMIT);
+ set_thread_flag(TIF_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
fault_done:
/* These values are no longer needed, clear them. */
set_thread_fault_code(0);
+ clear_thread_flag(TIF_BLKCOMMIT);
current_thread_info()->fault_address = 0;
}
#include <asm/spitfire.h>
#include <asm/sections.h>
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
extern void device_scan(void);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
put_cpu();
}
+/* When shared+writable mmaps of files go away, we lose all dirty
+ * page state, so we have to deal with D-cache aliasing here.
+ *
+ * This code relies on the fact that flush_cache_range() is always
+ * called for an area composed by a single VMA. It also assumes that
+ * the MM's page_table_lock is held.
+ */
+static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
+{
+ unsigned long offset;
+ pte_t *ptep;
+
+ if (pmd_none(*pmd))
+ return;
+ ptep = pte_offset_map(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+
+ if (pte_none(pte))
+ continue;
+
+ if (pte_present(pte) && pte_dirty(pte)) {
+ struct page *page;
+ unsigned long pgaddr, uaddr;
+ unsigned long pfn = pte_pfn(pte);
+
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page) || !page_mapping(page))
+ continue;
+ pgaddr = (unsigned long) page_address(page);
+ uaddr = address + offset;
+ if ((pgaddr ^ uaddr) & (1 << 13))
+ flush_dcache_page_all(mm, page);
+ }
+ }
+ pte_unmap(ptep - 1);
+}
+
+static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
+{
+ pmd_t *pmd;
+ unsigned long end;
+
+ if (pgd_none(*dir))
+ return;
+ pmd = pmd_offset(dir, address);
+ end = address + size;
+ if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+ end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+ do {
+ flush_cache_pte_range(mm, pmd, address, end - address);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *dir = pgd_offset(mm, start);
+
+ if (mm == current->mm)
+ flushw_user();
+
+ if (vma->vm_file == NULL ||
+ ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
+ return;
+
+ do {
+ flush_cache_pmd_range(mm, dir, start, end - start);
+ start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (start && (start < end));
+}
+
void flush_icache_range(unsigned long start, unsigned long end)
{
/* Cheetah has coherent I-cache. */
#else
#define DC_ALIAS_SHIFT 0
#endif
-pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page;
unsigned long color;
+++ /dev/null
-/* arch/sparc64/mm/tlb.c
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/percpu.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-#include <asm/tlb.h>
-
-/* Heavily inspired by the ppc64 code. */
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
- { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
-
-void flush_tlb_pending(void)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
-
- if (mp->tlb_nr) {
- unsigned long context = mp->mm->context;
-
- if (CTX_VALID(context)) {
-#ifdef CONFIG_SMP
- smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
- &mp->vaddrs[0]);
-#else
- __flush_tlb_pending(CTX_HWBITS(context), mp->tlb_nr,
- &mp->vaddrs[0]);
-#endif
- }
- mp->tlb_nr = 0;
- }
-}
-
-void tlb_batch_add(pte_t *ptep, pte_t orig)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- struct page *ptepage;
- struct mm_struct *mm;
- unsigned long vaddr, nr;
-
- ptepage = virt_to_page(ptep);
- mm = (struct mm_struct *) ptepage->mapping;
-
- /* It is more efficient to let flush_tlb_kernel_range()
- * handle these cases.
- */
- if (mm == &init_mm)
- return;
-
- vaddr = ptepage->index +
- (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
- if (pte_exec(orig))
- vaddr |= 0x1UL;
-
- if (pte_dirty(orig)) {
- unsigned long paddr, pfn = pte_pfn(orig);
- struct address_space *mapping;
- struct page *page;
-
- if (!pfn_valid(pfn))
- goto no_cache_flush;
-
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- goto no_cache_flush;
-
- /* A real file page? */
- mapping = page_mapping(page);
- if (!mapping)
- goto no_cache_flush;
-
- paddr = (unsigned long) page_address(page);
- if ((paddr ^ vaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
- }
-
-no_cache_flush:
- if (mp->tlb_frozen)
- return;
-
- nr = mp->tlb_nr;
-
- if (unlikely(nr != 0 && mm != mp->mm)) {
- flush_tlb_pending();
- nr = 0;
- }
-
- if (nr == 0)
- mp->mm = mm;
-
- mp->vaddrs[nr] = vaddr;
- mp->tlb_nr = ++nr;
- if (nr >= TLB_BATCH_NR)
- flush_tlb_pending();
-}
-
-void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- unsigned long nr = mp->tlb_nr;
- long s = start, e = end, vpte_base;
-
- if (mp->tlb_frozen)
- return;
-
- /* Nobody should call us with start below VM hole and end above.
- * See if it is really true.
- */
- BUG_ON(s > e);
-
-#if 0
- /* Currently free_pgtables guarantees this. */
- s &= PMD_MASK;
- e = (e + PMD_SIZE - 1) & PMD_MASK;
-#endif
- vpte_base = (tlb_type == spitfire ?
- VPTE_BASE_SPITFIRE :
- VPTE_BASE_CHEETAH);
-
- if (unlikely(nr != 0 && mm != mp->mm)) {
- flush_tlb_pending();
- nr = 0;
- }
-
- if (nr == 0)
- mp->mm = mm;
-
- start = vpte_base + (s >> (PAGE_SHIFT - 3));
- end = vpte_base + (e >> (PAGE_SHIFT - 3));
- while (start < end) {
- mp->vaddrs[nr] = start;
- mp->tlb_nr = ++nr;
- if (nr >= TLB_BATCH_NR) {
- flush_tlb_pending();
- nr = 0;
- }
- start += PAGE_SIZE;
- }
- if (nr)
- flush_tlb_pending();
-}
-
-unsigned long __ptrs_per_pmd(void)
-{
- if (test_thread_flag(TIF_32BIT))
- return (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT));
- return REAL_PTRS_PER_PMD;
-}
*/
.text
.align 32
- .globl __flush_tlb_mm
+ .globl __flush_tlb_page, __flush_tlb_mm, __flush_tlb_range
+__flush_tlb_page: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */
+ ldxa [%o2] ASI_DMMU, %g2
+ cmp %g2, %o0
+ bne,pn %icc, __spitfire_flush_tlb_page_slow
+ or %o1, 0x10, %g3
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ retl
+ flush %g6
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
nop
nop
- .align 32
- .globl __flush_tlb_pending
-__flush_tlb_pending:
- /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
- rdpr %pstate, %g5
- sllx %o1, 3, %o1
- andn %g5, PSTATE_IE, %g2
- wrpr %g2, %pstate
- mov SECONDARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- stxa %o0, [%o4] ASI_DMMU
-1: sub %o1, (1 << 3), %o1
- ldx [%o2 + %o1], %o3
- andcc %o3, 1, %g0
- andn %o3, 1, %o3
- be,pn %icc, 2f
- or %o3, 0x10, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-2: stxa %g0, [%o3] ASI_DMMU_DEMAP
- membar #Sync
- brnz,pt %o1, 1b
- nop
- stxa %g2, [%o4] ASI_DMMU
+__flush_tlb_range: /* %o0=(ctx&TAG_CONTEXT_BITS), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT,
+ * %o3=end&PAGE_MASK, %o4=PAGE_SIZE, %o5=(end - start)
+ */
+#define TLB_MAGIC 207 /* Students, do you know how I calculated this? -DaveM */
+ cmp %o5, %o4
+ bleu,pt %xcc, __flush_tlb_page
+ srlx %o5, PAGE_SHIFT, %g5
+ cmp %g5, TLB_MAGIC
+ bgeu,pn %icc, __spitfire_flush_tlb_range_constant_time
+ or %o1, 0x10, %g5
+ ldxa [%o2] ASI_DMMU, %g2
+ cmp %g2, %o0
+__spitfire_flush_tlb_range_page_by_page:
+ bne,pn %icc, __spitfire_flush_tlb_range_pbp_slow
+ sub %o5, %o4, %o5
+1: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
+ brnz,pt %o5, 1b
+ sub %o5, %o4, %o5
+ retl
+ flush %g6
+__spitfire_flush_tlb_range_constant_time: /* %o0=ctx, %o1=start, %o3=end */
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ mov TLB_TAG_ACCESS, %g3
+ mov ((SPITFIRE_HIGHEST_LOCKED_TLBENT-1) << 3), %g2
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
flush %g6
+
+1: ldxa [%g2] ASI_ITLB_TAG_READ, %o4
+ and %o4, TAG_CONTEXT_BITS, %o5
+ cmp %o5, %o0
+ bne,pt %icc, 2f
+ andn %o4, TAG_CONTEXT_BITS, %o4
+ cmp %o4, %o1
+ blu,pt %xcc, 2f
+ cmp %o4, %o3
+ blu,pn %xcc, 4f
+2: ldxa [%g2] ASI_DTLB_TAG_READ, %o4
+ and %o4, TAG_CONTEXT_BITS, %o5
+ cmp %o5, %o0
+ andn %o4, TAG_CONTEXT_BITS, %o4
+ bne,pt %icc, 3f
+ cmp %o4, %o1
+ blu,pt %xcc, 3f
+ cmp %o4, %o3
+ blu,pn %xcc, 5f
+ nop
+3: brnz,pt %g2, 1b
+ sub %g2, (1 << 3), %g2
retl
- wrpr %g5, 0x0, %pstate
+ wrpr %g1, 0x0, %pstate
+4: stxa %g0, [%g3] ASI_IMMU
+ stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS
+ flush %g6
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ flush %g6
+
+ ba,pt %xcc, 2b
+ nop
+
+5: stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS
+ flush %g6
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ flush %g6
+
+ ba,pt %xcc, 3b
+ nop
.align 32
.globl __flush_tlb_kernel_range
retl
wrpr %g1, 0, %pstate
+__spitfire_flush_tlb_page_slow:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ flush %g6
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g1, 0, %pstate
+
+__spitfire_flush_tlb_range_pbp_slow:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ stxa %o0, [%o2] ASI_DMMU
+
+2: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
+ brnz,pt %o5, 2b
+ sub %o5, %o4, %o5
+ flush %g6
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g1, 0x0, %pstate
+
/*
* The following code flushes one page_size worth.
*/
ba,a,pt %xcc, __prefill_itlb
/* Cheetah specific versions, patched at boot time. */
+__cheetah_flush_tlb_page: /* 14 insns */
+ rdpr %pstate, %g5
+ andn %g5, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o2
+ ldxa [%o2] ASI_DMMU, %g2
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g0, [%o1] ASI_DMMU_DEMAP
+ stxa %g0, [%o1] ASI_IMMU_DEMAP
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g5, 0x0, %pstate
+
__cheetah_flush_tlb_mm: /* 15 insns */
rdpr %pstate, %g5
andn %g5, PSTATE_IE, %g2
retl
wrpr %g5, 0x0, %pstate
-__cheetah_flush_tlb_pending: /* 22 insns */
- /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
- rdpr %pstate, %g5
- sllx %o1, 3, %o1
+__cheetah_flush_tlb_range: /* 20 insns */
+ cmp %o5, %o4
+ blu,pt %xcc, 9f
+ rdpr %pstate, %g5
andn %g5, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
wrpr %g0, 1, %tl
- mov PRIMARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- stxa %o0, [%o4] ASI_DMMU
-1: sub %o1, (1 << 3), %o1
- ldx [%o2 + %o1], %o3
- andcc %o3, 1, %g0
- be,pn %icc, 2f
- andn %o3, 1, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-2: stxa %g0, [%o3] ASI_DMMU_DEMAP
- brnz,pt %o1, 1b
- membar #Sync
- stxa %g2, [%o4] ASI_DMMU
+ mov PRIMARY_CONTEXT, %o2
+ sub %o5, %o4, %o5
+ ldxa [%o2] ASI_DMMU, %g2
+ stxa %o0, [%o2] ASI_DMMU
+1: stxa %g0, [%o1 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%o1 + %o5] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o5, 1b
+ sub %o5, %o4, %o5
+ stxa %g2, [%o2] ASI_DMMU
flush %g6
wrpr %g0, 0, %tl
- retl
+9: retl
wrpr %g5, 0x0, %pstate
flush_dcpage_cheetah: /* 11 insns */
cheetah_patch_cachetlbops:
save %sp, -128, %sp
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call cheetah_patch_one
+ mov 14, %o2
+
sethi %hi(__flush_tlb_mm), %o0
or %o0, %lo(__flush_tlb_mm), %o0
sethi %hi(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one
mov 15, %o2
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ sethi %hi(__flush_tlb_range), %o0
+ or %o0, %lo(__flush_tlb_range), %o0
+ sethi %hi(__cheetah_flush_tlb_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_range), %o1
call cheetah_patch_one
- mov 22, %o2
+ mov 20, %o2
sethi %hi(__flush_dcache_page), %o0
or %o0, %lo(__flush_dcache_page), %o0
* TODO: Make xcall TLB range flushes use the tricks above... -DaveM
*/
.align 32
- .globl xcall_flush_tlb_mm
+ .globl xcall_flush_tlb_page, xcall_flush_tlb_mm, xcall_flush_tlb_range
+xcall_flush_tlb_page:
+ mov PRIMARY_CONTEXT, %g2
+ ldxa [%g2] ASI_DMMU, %g3
+ stxa %g5, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DMMU_DEMAP
+ stxa %g0, [%g1] ASI_IMMU_DEMAP
+ stxa %g3, [%g2] ASI_DMMU
+ retry
+ nop
+
xcall_flush_tlb_mm:
mov PRIMARY_CONTEXT, %g2
mov 0x40, %g4
stxa %g3, [%g2] ASI_DMMU
retry
- .globl xcall_flush_tlb_pending
-xcall_flush_tlb_pending:
- /* %g5=context, %g1=nr, %g7=vaddrs[] */
- sllx %g1, 3, %g1
- mov PRIMARY_CONTEXT, %g4
- ldxa [%g4] ASI_DMMU, %g2
+xcall_flush_tlb_range:
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ add %g2, 1, %g2
+ srlx %g3, PAGE_SHIFT, %g4
+ cmp %g4, 96
+
+ bgu,pn %icc, xcall_flush_tlb_mm
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g7
+ sub %g3, %g2, %g3
stxa %g5, [%g4] ASI_DMMU
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %g5
- andcc %g5, 0x1, %g0
- be,pn %icc, 2f
-
- andn %g5, 0x1, %g5
- stxa %g0, [%g5] ASI_IMMU_DEMAP
-2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ nop
+ nop
+ nop
+
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
membar #Sync
- brnz,pt %g1, 1b
- nop
- stxa %g2, [%g4] ASI_DMMU
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ stxa %g7, [%g4] ASI_DMMU
retry
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range:
retry
nop
nop
+ nop
/* This runs in a very controlled environment, so we do
* not need to worry about BH races etc.
__asm__ ("srl %0, 0, %0" \
: "=r" (__ret) \
: "0" (__x)); \
- (void __user *)__ret; \
+ __ret; \
})
extern unsigned sys_call_table[];
#define UFSMAGIC (((unsigned)'u'<<24)||((unsigned)'f'<<16)||((unsigned)'s'<<8))
-static inline int putstat(struct sol_stat __user *ubuf, struct kstat *kbuf)
+static inline int putstat(struct sol_stat *ubuf, struct kstat *kbuf)
{
if (kbuf->size > MAX_NON_LFS ||
!sysv_valid_dev(kbuf->dev) ||
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
-static inline int putstat64(struct sol_stat64 __user *ubuf, struct kstat *kbuf)
+static inline int putstat64(struct sol_stat64 *ubuf, struct kstat *kbuf)
{
if (!sysv_valid_dev(kbuf->dev) || !sysv_valid_dev(kbuf->rdev))
return -EOVERFLOW;
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
asmlinkage int solaris_stat(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_stat(A(filename), &s);
- if (!ret)
- return putstat(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_stat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat((struct sol_stat *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_stat64(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_stat(A(filename), &s);
- if (!ret)
- return putstat64(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_stat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_lstat(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_lstat(A(filename), &s);
- if (!ret)
- return putstat(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_lstat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat((struct sol_stat *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_lstat64(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_lstat(A(filename), &s);
- if (!ret)
- return putstat64(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_lstat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_fstat(unsigned int fd, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_fstat(fd, &s);
+ ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat(A(statbuf), &s);
+ return putstat((struct sol_stat *)A(statbuf), &s);
return ret;
}
asmlinkage int solaris_fstat64(unsigned int fd, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_fstat(fd, &s);
+
+ ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat64(A(statbuf), &s);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
return ret;
}
asmlinkage int solaris_mknod(u32 path, u32 mode, s32 dev)
{
- int (*sys_mknod)(const char __user *,int,unsigned) =
- (int (*)(const char __user *,int,unsigned))SYS(mknod);
+ int (*sys_mknod)(const char *,int,unsigned) =
+ (int (*)(const char *,int,unsigned))SYS(mknod);
int major = sysv_major(dev);
int minor = sysv_minor(dev);
/* minor is guaranteed to be OK for MKDEV, major might be not */
if (major > 0xfff)
return -EINVAL;
- return sys_mknod(A(path), mode, new_encode_dev(MKDEV(major,minor)));
+ return sys_mknod((const char *)A(path), mode,
+ new_encode_dev(MKDEV(major,minor)));
}
asmlinkage int solaris_xmknod(int vers, u32 path, u32 mode, s32 dev)
return solaris_mknod(path, mode, dev);
}
-asmlinkage int solaris_getdents64(unsigned int fd, void __user *dirent, unsigned int count)
+asmlinkage int solaris_getdents64(unsigned int fd, void *dirent, unsigned int count)
{
- int (*sys_getdents)(unsigned int, void __user *, unsigned int) =
- (int (*)(unsigned int, void __user *, unsigned int))SYS(getdents);
+ int (*sys_getdents)(unsigned int, void *, unsigned int) =
+ (int (*)(unsigned int, void *, unsigned int))SYS(getdents);
return sys_getdents(fd, dirent, count);
}
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_statfs)(const char __user *,struct statfs __user *) =
- (int (*)(const char __user *,struct statfs __user *))SYS(statfs);
- struct sol_statfs __user *ss = A(buf);
+ int (*sys_statfs)(const char *,struct statfs *) =
+ (int (*)(const char *,struct statfs *))SYS(statfs);
+ struct sol_statfs *ss = (struct sol_statfs *)A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
- /* FIXME: mixing userland and kernel pointers */
set_fs (KERNEL_DS);
- ret = sys_statfs(A(path), &s);
+ ret = sys_statfs((const char *)A(path), &s);
set_fs (old_fs);
if (!ret) {
if (put_user (s.f_type, &ss->f_type) ||
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_fstatfs)(unsigned,struct statfs __user *) =
- (int (*)(unsigned,struct statfs __user *))SYS(fstatfs);
- struct sol_statfs __user *ss = A(buf);
+ int (*sys_fstatfs)(unsigned,struct statfs *) =
+ (int (*)(unsigned,struct statfs *))SYS(fstatfs);
+ struct sol_statfs *ss = (struct sol_statfs *)A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
{
struct kstatfs s;
int error;
- struct sol_statvfs __user *ss = A(buf);
+ struct sol_statvfs *ss = (struct sol_statvfs *)A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode) || (mnt && MNT_IS_RDONLY(mnt))) i = 1;
+ if (IS_RDONLY(inode)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char __user *)&ss->f_basetype[j]) ||
+ __put_user (0, (char *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
{
struct kstatfs s;
int error;
- struct sol_statvfs64 __user *ss = A(buf);
+ struct sol_statvfs64 *ss = (struct sol_statvfs64 *)A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode) || (mnt && MNT_IS_RDONLY(mnt))) i = 1;
+ if (IS_RDONLY(inode)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char __user *)&ss->f_basetype[j]) ||
+ __put_user (0, (char *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
struct nameidata nd;
int error;
- error = user_path_walk(A(path),&nd);
+ error = user_path_walk((const char *)A(path),&nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs(nd.mnt, inode, buf);
int error;
lock_kernel();
- error = user_path_walk(A(path), &nd);
+ error = user_path_walk((const char *)A(path), &nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs64(nd.mnt, inode, buf);
case SOL_F_SETLKW:
{
struct flock f;
- struct sol_flock __user *p = A(arg);
mm_segment_t old_fs = get_fs();
switch (cmd) {
case SOL_F_SETLKW: cmd = F_SETLKW; break;
}
- if (get_user (f.l_type, &p->l_type) ||
- __get_user (f.l_whence, &p->l_whence) ||
- __get_user (f.l_start, &p->l_start) ||
- __get_user (f.l_len, &p->l_len) ||
- __get_user (f.l_pid, &p->l_sysid))
+ if (get_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
+ __get_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
+ __get_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
+ __get_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
+ __get_user (f.l_pid, &((struct sol_flock *)A(arg))->l_sysid))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long)&f);
set_fs(old_fs);
- if (__put_user (f.l_type, &p->l_type) ||
- __put_user (f.l_whence, &p->l_whence) ||
- __put_user (f.l_start, &p->l_start) ||
- __put_user (f.l_len, &p->l_len) ||
- __put_user (f.l_pid, &p->l_pid) ||
- __put_user (0, &p->l_sysid))
+ if (__put_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
+ __put_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
+ __put_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
+ __put_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
+ __put_user (f.l_pid, &((struct sol_flock *)A(arg))->l_pid) ||
+ __put_user (0, &((struct sol_flock *)A(arg))->l_sysid))
return -EFAULT;
return ret;
int (*sys_newftruncate)(unsigned int, unsigned long)=
(int (*)(unsigned int, unsigned long))SYS(ftruncate);
- if (get_user(length, &((struct sol_flock __user *)A(arg))->l_start))
+ if (get_user(length, &((struct sol_flock*)A(arg))->l_start))
return -EFAULT;
return sys_newftruncate(fd, length);
return -ENOSYS;
}
-asmlinkage int solaris_pread(unsigned int fd, char __user *buf, u32 count, u32 pos)
+asmlinkage int solaris_pread(unsigned int fd, char *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pread64)(unsigned int, char __user *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pread64);
+ ssize_t (*sys_pread64)(unsigned int, char *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pread64);
return sys_pread64(fd, buf, count, (loff_t)pos);
}
-asmlinkage int solaris_pwrite(unsigned int fd, char __user *buf, u32 count, u32 pos)
+asmlinkage int solaris_pwrite(unsigned int fd, char *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pwrite64)(unsigned int, char __user *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pwrite64);
+ ssize_t (*sys_pwrite64)(unsigned int, char *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pwrite64);
return sys_pwrite64(fd, buf, count, (loff_t)pos);
}
/* solaris_llseek returns long long - quite difficult */
asmlinkage long solaris_llseek(struct pt_regs *regs, u32 off_hi, u32 off_lo, int whence)
{
- int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int) =
- (int (*)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int))SYS(_llseek);
+ int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) =
+ (int (*)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int))SYS(_llseek);
int ret;
mm_segment_t old_fs = get_fs();
loff_t retval;
/* Have to mask out all but lower 3 bits */
asmlinkage int solaris_access(u32 filename, long mode)
{
- int (*sys_access)(const char __user *, int) =
- (int (*)(const char __user *, int))SYS(access);
+ int (*sys_access)(const char *, int) =
+ (int (*)(const char *, int))SYS(access);
- return sys_access(A(filename), mode & 7);
+ return sys_access((const char *)A(filename), mode & 7);
}
u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-extern int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
- char __user *data_buf, int data_len, int flags);
-extern int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, int __user *ctl_len,
- char __user *data_buf, int data_maxlen, int __user *data_len, int *flags);
+extern int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
+ char *data_buf, int data_len, int flags);
+extern int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, int *ctl_len,
+ char *data_buf, int data_maxlen, int *data_len, int *flags);
/* termio* stuff {{{ */
static inline int linux_to_solaris_termio(unsigned int fd, unsigned int cmd, u32 arg)
{
- struct solaris_termio __user *p = A(arg);
int ret;
- ret = sys_ioctl(fd, cmd, (unsigned long)p);
+ ret = sys_ioctl(fd, cmd, A(arg));
if (!ret) {
u32 cflag;
- if (__get_user (cflag, &p->c_cflag))
+ if (__get_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
return -EFAULT;
cflag = linux_to_solaris_cflag(cflag);
- if (__put_user (cflag, &p->c_cflag))
+ if (__put_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
return -EFAULT;
}
return ret;
struct solaris_termio s;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&s, (struct solaris_termio __user *)A(arg), sizeof(struct solaris_termio)))
+ if (copy_from_user (&s, (struct solaris_termio *)A(arg), sizeof(struct solaris_termio)))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, cmd, (unsigned long)&s);
set_fs(old_fs);
if (!ret) {
- struct solaris_termios __user *p = A(arg);
- if (put_user (s.c_iflag, &p->c_iflag) ||
- __put_user (s.c_oflag, &p->c_oflag) ||
- __put_user (linux_to_solaris_cflag(s.c_cflag), &p->c_cflag) ||
- __put_user (s.c_lflag, &p->c_lflag) ||
- __copy_to_user (p->c_cc, s.c_cc, 16) ||
- __clear_user (p->c_cc + 16, 2))
+ if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
+ __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
+ __put_user (linux_to_solaris_cflag(s.c_cflag), &((struct solaris_termios *)A(arg))->c_cflag) ||
+ __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
+ __copy_to_user (((struct solaris_termios *)A(arg))->c_cc, s.c_cc, 16) ||
+ __clear_user (((struct solaris_termios *)A(arg))->c_cc + 16, 2))
return -EFAULT;
}
return ret;
{
int ret;
struct solaris_termios s;
- struct solaris_termios __user *p = A(arg);
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, TCGETS, (unsigned long)&s);
set_fs(old_fs);
if (ret) return ret;
- if (put_user (s.c_iflag, &p->c_iflag) ||
- __put_user (s.c_oflag, &p->c_oflag) ||
- __put_user (s.c_cflag, &p->c_cflag) ||
- __put_user (s.c_lflag, &p->c_lflag) ||
- __copy_from_user (s.c_cc, p->c_cc, 16))
+ if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
+ __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
+ __put_user (s.c_cflag, &((struct solaris_termios *)A(arg))->c_cflag) ||
+ __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
+ __copy_from_user (s.c_cc, ((struct solaris_termios *)A(arg))->c_cc, 16))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
case 109: /* SI_SOCKPARAMS */
{
struct solaris_si_sockparams si;
- if (copy_from_user (&si, A(arg), sizeof(si)))
+ if (copy_from_user (&si, (struct solaris_si_sockparams *) A(arg), sizeof(si)))
return (EFAULT << 8) | TSYSERR;
/* Should we modify socket ino->socket_i.ops and type? */
case 110: /* SI_GETUDATA */
{
int etsdusize, servtype;
- struct solaris_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &p->tidusize) ||
- __put_user(sizeof(struct sockaddr), &p->addrsize) ||
- __put_user(-1, &p->optsize) ||
- __put_user(etsdusize, &p->etsdusize) ||
- __put_user(servtype, &p->servtype) ||
- __put_user(0, &p->so_state) ||
- __put_user(0, &p->so_options) ||
- __put_user(16384, &p->tsdusize) ||
- __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
- __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
- __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
+ if (put_user(16384, &((struct solaris_si_udata *)A(arg))->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &((struct solaris_si_udata *)A(arg))->addrsize) ||
+ __put_user(-1, &((struct solaris_si_udata *)A(arg))->optsize) ||
+ __put_user(etsdusize, &((struct solaris_si_udata *)A(arg))->etsdusize) ||
+ __put_user(servtype, &((struct solaris_si_udata *)A(arg))->servtype) ||
+ __put_user(0, &((struct solaris_si_udata *)A(arg))->so_state) ||
+ __put_user(0, &((struct solaris_si_udata *)A(arg))->so_options) ||
+ __put_user(16384, &((struct solaris_si_udata *)A(arg))->tsdusize) ||
+ __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_family) ||
+ __put_user(SOCKET_I(ino)->type, &((struct solaris_si_udata *)A(arg))->sockparams.sp_type) ||
+ __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_protocol))
return (EFAULT << 8) | TSYSERR;
return 0;
}
case 101: /* O_SI_GETUDATA */
{
int etsdusize, servtype;
- struct solaris_o_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &p->tidusize) ||
- __put_user(sizeof(struct sockaddr), &p->addrsize) ||
- __put_user(-1, &p->optsize) ||
- __put_user(etsdusize, &p->etsdusize) ||
- __put_user(servtype, &p->servtype) ||
- __put_user(0, &p->so_state) ||
- __put_user(0, &p->so_options) ||
- __put_user(16384, &p->tsdusize))
+ if (put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &((struct solaris_o_si_udata *)A(arg))->addrsize) ||
+ __put_user(-1, &((struct solaris_o_si_udata *)A(arg))->optsize) ||
+ __put_user(etsdusize, &((struct solaris_o_si_udata *)A(arg))->etsdusize) ||
+ __put_user(servtype, &((struct solaris_o_si_udata *)A(arg))->servtype) ||
+ __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_state) ||
+ __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_options) ||
+ __put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tsdusize))
return (EFAULT << 8) | TSYSERR;
return 0;
}
}
static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
- int len, int __user *len_p)
+ int len, int *len_p)
{
int ret;
int i;
u32 prim;
SOLD("TI_OPMGMT entry");
- ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 __user *)A(arg)))
+ if (get_user(prim, (u32 *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 __user *)A(arg)+3) ||
- get_user(tmp2, (u32 __user *)A(arg)+2))
+ if (get_user(tmp, (u32 *)A(arg)+3) ||
+ get_user(tmp2, (u32 *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
int i;
u32 prim;
SOLD("TI_BIND entry");
- ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
len = 1024; /* Solaris allows arbitrary return size */
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 __user *)A(arg)))
+ if (get_user(prim, (u32 *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 __user *)A(arg)+3) ||
- get_user(tmp2, (u32 __user *)A(arg)+2))
+ if (get_user(tmp, (u32 *)A(arg)+3) ||
+ get_user(tmp2, (u32 *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
SOLD("OK_ACK requested");
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
return -ENOSYS;
case 2: /* I_PUSH */
{
- p = getname (A(arg));
+ p = getname ((char *)A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = -EINVAL;
const char *p;
if (sock->modcount <= 0) return -EINVAL;
p = module_table[(unsigned)sock->module[sock->modcount]].name;
- if (copy_to_user (A(arg), p, strlen(p)))
+ if (copy_to_user ((char *)A(arg), p, strlen(p)))
return -EFAULT;
return 0;
}
case 5: /* I_FLUSH */
return 0;
case 8: /* I_STR */
- if (copy_from_user(&si, A(arg), sizeof(struct strioctl)))
+ if (copy_from_user(&si, (struct strioctl *)A(arg), sizeof(struct strioctl)))
return -EFAULT;
/* We ignore what module is actually at the top of stack. */
switch ((si.cmd >> 8) & 0xff) {
return solaris_sockmod(fd, si.cmd, si.data);
case 'T':
return solaris_timod(fd, si.cmd, si.data, si.len,
- &((struct strioctl __user *)A(arg))->len);
+ &((struct strioctl*)A(arg))->len);
default:
return solaris_ioctl(fd, si.cmd, si.data);
}
case 11: /* I_FIND */
{
int i;
- p = getname (A(arg));
+ p = getname ((char *)A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = 0;
return 0; /* We don't support them */
case 1: /* SIOCGHIWAT */
case 3: /* SIOCGLOWAT */
- if (put_user (0, (u32 __user *)A(arg)))
+ if (put_user (0, (u32 *)A(arg)))
return -EFAULT;
return 0; /* Lie */
case 7: /* SIOCATMARK */
args);
set_fs(old_fs);
if (ret >= 0) {
- if (copy_to_user(A(arg), &uaddr, uaddr_len))
+ if (copy_to_user((char *)A(arg), &uaddr, uaddr_len))
return -EFAULT;
}
return ret;
for (d = dev_base; d; d = d->next) i++;
read_unlock_bh(&dev_base_lock);
- if (put_user (i, (int __user *)A(arg)))
+ if (put_user (i, (int *)A(arg)))
return -EFAULT;
return 0;
}
asmlinkage long solaris_shmsys(int cmd, u32 arg1, u32 arg2, u32 arg3)
{
- int (*sys_ipc)(unsigned,int,int,unsigned long,void __user *,long) =
- (int (*)(unsigned,int,int,unsigned long,void __user *,long))SYS(ipc);
+ int (*sys_ipc)(unsigned,int,int,unsigned long,void *,long) =
+ (int (*)(unsigned,int,int,unsigned long,void *,long))SYS(ipc);
mm_segment_t old_fs;
unsigned long raddr;
int ret;
case 0: /* shmat */
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, A(arg2), 0);
+ ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, (void *)A(arg2), 0);
set_fs(old_fs);
if (ret >= 0) return (u32)raddr;
else return ret;
case 11: /* IPC_SET */
{
struct shmid_ds s;
- struct solaris_shmid_ds __user *p = A(arg3);
- if (get_user (s.shm_perm.uid, &p->shm_perm.uid) ||
- __get_user (s.shm_perm.gid, &p->shm_perm.gid) ||
- __get_user (s.shm_perm.mode, &p->shm_perm.mode))
+ if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
+ __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
+ __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
case 12: /* IPC_STAT */
{
struct shmid_ds s;
- struct solaris_shmid_ds __user *p = A(arg3);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
set_fs(old_fs);
- if (put_user (s.shm_perm.uid, &(p->shm_perm.uid)) ||
- __put_user (s.shm_perm.gid, &(p->shm_perm.gid)) ||
- __put_user (s.shm_perm.cuid, &(p->shm_perm.cuid)) ||
- __put_user (s.shm_perm.cgid, &(p->shm_perm.cgid)) ||
- __put_user (s.shm_perm.mode, &(p->shm_perm.mode)) ||
- __put_user (s.shm_perm.seq, &(p->shm_perm.seq)) ||
- __put_user (s.shm_perm.key, &(p->shm_perm.key)) ||
- __put_user (s.shm_segsz, &(p->shm_segsz)) ||
- __put_user (s.shm_lpid, &(p->shm_lpid)) ||
- __put_user (s.shm_cpid, &(p->shm_cpid)) ||
- __put_user (s.shm_nattch, &(p->shm_nattch)) ||
- __put_user (s.shm_atime, &(p->shm_atime)) ||
- __put_user (s.shm_dtime, &(p->shm_dtime)) ||
- __put_user (s.shm_ctime, &(p->shm_ctime)))
+ if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
+ __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
+ __get_user (s.shm_perm.cuid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cuid)) ||
+ __get_user (s.shm_perm.cgid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cgid)) ||
+ __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)) ||
+ __get_user (s.shm_perm.seq, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.seq)) ||
+ __get_user (s.shm_perm.key, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.key)) ||
+ __get_user (s.shm_segsz, &(((struct solaris_shmid_ds *)A(arg3))->shm_segsz)) ||
+ __get_user (s.shm_lpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_lpid)) ||
+ __get_user (s.shm_cpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_cpid)) ||
+ __get_user (s.shm_nattch, &(((struct solaris_shmid_ds *)A(arg3))->shm_nattch)) ||
+ __get_user (s.shm_atime, &(((struct solaris_shmid_ds *)A(arg3))->shm_atime)) ||
+ __get_user (s.shm_dtime, &(((struct solaris_shmid_ds *)A(arg3))->shm_dtime)) ||
+ __get_user (s.shm_ctime, &(((struct solaris_shmid_ds *)A(arg3))->shm_ctime)))
return -EFAULT;
return ret;
}
default: return -EINVAL;
}
case 2: /* shmdt */
- return sys_ipc(SHMDT, 0, 0, 0, A(arg1), 0);
+ return sys_ipc(SHMDT, 0, 0, 0, (void *)A(arg1), 0);
case 3: /* shmget */
return sys_ipc(SHMGET, arg1, arg2, arg3, NULL, 0);
}
u32 offlo;
if (regs->u_regs[UREG_G1]) {
- if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
+ if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
return -EFAULT;
} else {
- if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
+ if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
return -EFAULT;
}
return do_solaris_mmap((u32)regs->u_regs[UREG_I0], len, prot, flags, fd, (((u64)offhi)<<32)|offlo);
for (p=from,i=0; *p && *p != '.' && --len; p++,i++); \
else \
i = len - 1; \
- if (__put_user('\0', (char __user *)((to)+i))) \
+ if (__put_user('\0', (char *)(to+i))) \
return -EFAULT; \
}
asmlinkage int solaris_utssys(u32 buf, u32 flags, int which, u32 buf2)
{
- struct sol_uname __user *v = A(buf);
switch (which) {
case 0: /* old uname */
/* Let's cheat */
- set_utsfield(v->sysname, "SunOS", 1, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->sysname,
+ "SunOS", 1, 0);
down_read(&uts_sem);
- set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
+ set_utsfield(((struct sol_uname *)A(buf))->nodename,
+ system_utsname.nodename, 1, 1);
up_read(&uts_sem);
- set_utsfield(v->release, "2.6", 0, 0);
- set_utsfield(v->version, "Generic", 0, 0);
- set_utsfield(v->machine, machine(), 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->release,
+ "2.6", 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->version,
+ "Generic", 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->machine,
+ machine(), 0, 0);
return 0;
case 2: /* ustat */
return -ENOSYS;
asmlinkage int solaris_utsname(u32 buf)
{
- struct sol_utsname __user *v = A(buf);
/* Why should we not lie a bit? */
down_read(&uts_sem);
- set_utsfield(v->sysname, "SunOS", 0, 0);
- set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
- set_utsfield(v->release, "5.6", 0, 0);
- set_utsfield(v->version, "Generic", 0, 0);
- set_utsfield(v->machine, machine(), 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->sysname,
+ "SunOS", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->nodename,
+ system_utsname.nodename, 1, 1);
+ set_utsfield(((struct sol_utsname *)A(buf))->release,
+ "5.6", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->version,
+ "Generic", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->machine,
+ machine(), 0, 0);
up_read(&uts_sem);
return 0;
}
}
len = strlen(r) + 1;
if (count < len) {
- if (copy_to_user(A(buf), r, count - 1) ||
- __put_user(0, (char __user *)A(buf) + count - 1))
+ if (copy_to_user((char *)A(buf), r, count - 1) ||
+ __put_user(0, (char *)A(buf) + count - 1))
return -EFAULT;
} else {
- if (copy_to_user(A(buf), r, len))
+ if (copy_to_user((char *)A(buf), r, len))
return -EFAULT;
}
return len;
u32 rlim_max;
};
-asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r;
int ret;
return ret;
}
-asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit __user *rlim)
+asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit *rlim)
{
struct rlimit r;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit __user *rlim)
+asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
s32 stbcnt;
};
-asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval __user *ntp)
+asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval *ntp)
{
- int (*sys_adjtimex)(struct timex __user *) =
- (int (*)(struct timex __user *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex *) =
+ (int (*)(struct timex *))SYS(adjtimex);
struct timex t;
int ret;
mm_segment_t old_fs = get_fs();
return ret;
}
-asmlinkage int solaris_ntp_adjtime(struct sol_timex __user *txp)
+asmlinkage int solaris_ntp_adjtime(struct sol_timex *txp)
{
- int (*sys_adjtimex)(struct timex __user *) =
- (int (*)(struct timex __user *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex *) =
+ (int (*)(struct timex *))SYS(adjtimex);
struct timex t;
int ret, err;
mm_segment_t old_fs = get_fs();
struct sigaction sa, old;
int ret;
mm_segment_t old_fs = get_fs();
- int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
- (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+ int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
+ (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
sigemptyset(&sa.sa_mask);
sa.sa_restorer = NULL;
sa.sa_flags = 0;
if (one_shot) sa.sa_flags = SA_ONESHOT | SA_NOMASK;
set_fs (KERNEL_DS);
- ret = sys_sigaction(sig, (void __user *)&sa, (void __user *)&old);
+ ret = sys_sigaction(sig, &sa, &old);
set_fs (old_fs);
if (ret < 0) return ret;
- return (u32)(unsigned long)old.sa_handler;
+ return (u32)(long)old.sa_handler;
}
static inline long solaris_signal(int sig, u32 arg)
static inline long solaris_sigignore(int sig)
{
- return sig_handler(sig, (u32)(unsigned long)SIG_IGN, 0);
+ return sig_handler (sig, (u32)SIG_IGN, 0);
}
static inline long solaris_sigpause(int sig)
sigset_t in_s, *ins, out_s, *outs;
mm_segment_t old_fs = get_fs();
int ret;
- int (*sys_sigprocmask)(int,sigset_t __user *,sigset_t __user *) =
- (int (*)(int,sigset_t __user *,sigset_t __user *))SYS(sigprocmask);
+ int (*sys_sigprocmask)(int,sigset_t *,sigset_t *) =
+ (int (*)(int,sigset_t *,sigset_t *))SYS(sigprocmask);
ins = NULL; outs = NULL;
if (in) {
u32 tmp[2];
- if (copy_from_user (tmp, (void __user *)A(in), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (sol_sigset_t *)A(in), 2*sizeof(u32)))
return -EFAULT;
ins = &in_s;
if (mapin (tmp, ins)) return -EINVAL;
}
if (out) outs = &out_s;
set_fs (KERNEL_DS);
- ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how,
- (void __user *)ins, (void __user *)outs);
+ ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how, ins, outs);
set_fs (old_fs);
if (ret) return ret;
if (out) {
tmp[2] = 0; tmp[3] = 0;
if (mapout (outs, tmp)) return -EINVAL;
- if (copy_to_user((void __user *)A(out), tmp, 4*sizeof(u32)))
+ if (copy_to_user((sol_sigset_t *)A(out), tmp, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
sigset_t s;
u32 tmp[2];
- if (copy_from_user (tmp, (sol_sigset_t __user *)A(mask), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (sol_sigset_t *)A(mask), 2*sizeof(u32)))
return -EFAULT;
if (mapin (tmp, &s)) return -EINVAL;
return (long)s.sig[0];
struct sigaction s, s2;
int ret;
mm_segment_t old_fs = get_fs();
- struct sol_sigaction __user *p = (void __user *)A(old);
- int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
- (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+ int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
+ (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
sig = mapsig(sig);
if (sig < 0) {
/* We cheat a little bit for Solaris only signals */
- if (old && clear_user(p, sizeof(struct sol_sigaction)))
+ if (old && clear_user((struct sol_sigaction *)A(old), sizeof(struct sol_sigaction)))
return -EFAULT;
return 0;
}
if (act) {
- if (get_user (tmp, &p->sa_flags))
+ if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_flags))
return -EFAULT;
s.sa_flags = 0;
if (tmp & SOLARIS_SA_ONSTACK) s.sa_flags |= SA_STACK;
if (tmp & SOLARIS_SA_NODEFER) s.sa_flags |= SA_NOMASK;
if (tmp & SOLARIS_SA_RESETHAND) s.sa_flags |= SA_ONESHOT;
if (tmp & SOLARIS_SA_NOCLDSTOP) s.sa_flags |= SA_NOCLDSTOP;
- if (get_user (tmp, &p->sa_handler) ||
- copy_from_user (tmp2, &p->sa_mask, 2*sizeof(u32)))
+ if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_handler) ||
+ copy_from_user (tmp2, &((struct sol_sigaction *)A(act))->sa_mask, 2*sizeof(u32)))
return -EFAULT;
s.sa_handler = (__sighandler_t)A(tmp);
if (mapin (tmp2, &s.sa_mask)) return -EINVAL;
- s.sa_restorer = NULL;
+ s.sa_restorer = 0;
}
set_fs(KERNEL_DS);
- ret = sys_sigaction(sig, act ? (void __user *)&s : NULL,
- old ? (void __user *)&s2 : NULL);
+ ret = sys_sigaction(sig, act ? &s : NULL, old ? &s2 : NULL);
set_fs(old_fs);
if (ret) return ret;
if (old) {
if (s2.sa_flags & SA_NOMASK) tmp |= SOLARIS_SA_NODEFER;
if (s2.sa_flags & SA_ONESHOT) tmp |= SOLARIS_SA_RESETHAND;
if (s2.sa_flags & SA_NOCLDSTOP) tmp |= SOLARIS_SA_NOCLDSTOP;
- if (put_user (tmp, &p->sa_flags) ||
- __put_user ((u32)(unsigned long)s2.sa_handler, &p->sa_handler) ||
- copy_to_user (&p->sa_mask, tmp2, 4*sizeof(u32)))
+ if (put_user (tmp, &((struct sol_sigaction *)A(old))->sa_flags) ||
+ __put_user ((u32)(long)s2.sa_handler, &((struct sol_sigaction *)A(old))->sa_handler) ||
+ copy_to_user (&((struct sol_sigaction *)A(old))->sa_mask, tmp2, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
}
if (mapout (&s, tmp)) return -EINVAL;
tmp[2] = 0; tmp[3] = 0;
- if (copy_to_user ((u32 __user *)A(set), tmp, sizeof(tmp)))
+ if (copy_to_user ((u32 *)A(set), tmp, sizeof(tmp)))
return -EFAULT;
return 0;
}
asmlinkage int solaris_wait(u32 stat_loc)
{
- unsigned __user *p = (unsigned __user *)A(stat_loc);
- int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
- (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+ int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
+ (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
int ret, status;
- ret = sys_wait4(-1, p, WUNTRACED, NULL);
+ ret = sys_wait4(-1, (unsigned int *)A(stat_loc), WUNTRACED, NULL);
if (ret >= 0 && stat_loc) {
- if (get_user (status, p))
+ if (get_user (status, (unsigned int *)A(stat_loc)))
return -EFAULT;
if (((status - 1) & 0xffff) < 0xff)
status = linux_to_solaris_signals[status & 0x7f] & 0x7f;
else if ((status & 0xff) == 0x7f)
status = (linux_to_solaris_signals[(status >> 8) & 0xff] << 8) | 0x7f;
- if (__put_user (status, p))
+ if (__put_user (status, (unsigned int *)A(stat_loc)))
return -EFAULT;
}
return ret;
asmlinkage int solaris_waitid(int idtype, s32 pid, u32 info, int options)
{
- int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
- (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+ int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
+ (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
int opts, status, ret;
switch (idtype) {
if (options & SOLARIS_WUNTRACED) opts |= WUNTRACED;
if (options & SOLARIS_WNOHANG) opts |= WNOHANG;
current->state = TASK_RUNNING;
- ret = sys_wait4(pid, (unsigned int __user *)A(info), opts, NULL);
+ ret = sys_wait4(pid, (unsigned int *)A(info), opts, NULL);
if (ret < 0) return ret;
if (info) {
- struct sol_siginfo __user *s = (void __user *)A(info);
+ struct sol_siginfo *s = (struct sol_siginfo *)A(info);
- if (get_user (status, (unsigned int __user *)A(info)))
+ if (get_user (status, (unsigned int *)A(info)))
return -EFAULT;
if (__put_user (SOLARIS_SIGCLD, &s->si_signo) ||
return sunos_getsockopt(fd, level, optname, optval, optlen);
}
-asmlinkage int solaris_connect(int fd, struct sockaddr __user *addr, int addrlen)
+asmlinkage int solaris_connect(int fd, struct sockaddr *addr, int addrlen)
{
- int (*sys_connect)(int, struct sockaddr __user *, int) =
- (int (*)(int, struct sockaddr __user *, int))SYS(connect);
+ int (*sys_connect)(int, struct sockaddr *, int) =
+ (int (*)(int, struct sockaddr *, int))SYS(connect);
return sys_connect(fd, addr, addrlen);
}
-asmlinkage int solaris_accept(int fd, struct sockaddr __user *addr, int __user *addrlen)
+asmlinkage int solaris_accept(int fd, struct sockaddr *addr, int *addrlen)
{
- int (*sys_accept)(int, struct sockaddr __user *, int __user *) =
- (int (*)(int, struct sockaddr __user *, int __user *))SYS(accept);
+ int (*sys_accept)(int, struct sockaddr *, int *) =
+ (int (*)(int, struct sockaddr *, int *))SYS(accept);
return sys_accept(fd, addr, addrlen);
}
return fl;
}
-asmlinkage int solaris_recvfrom(int s, char __user *buf, int len, int flags, u32 from, u32 fromlen)
+asmlinkage int solaris_recvfrom(int s, char *buf, int len, int flags, u32 from, u32 fromlen)
{
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
- return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), A(from), A(fromlen));
+ return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(from), (int *)A(fromlen));
}
-asmlinkage int solaris_recv(int s, char __user *buf, int len, int flags)
+asmlinkage int solaris_recv(int s, char *buf, int len, int flags)
{
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
}
-asmlinkage int solaris_sendto(int s, char __user *buf, int len, int flags, u32 to, u32 tolen)
+asmlinkage int solaris_sendto(int s, char *buf, int len, int flags, u32 to, u32 tolen)
{
- int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(sendto);
+ int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(sendto);
- return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), A(to), A(tolen));
+ return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(to), (int *)A(tolen));
}
asmlinkage int solaris_send(int s, char *buf, int len, int flags)
};
static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
- struct sol_nmsghdr __user *umsg)
+ struct sol_nmsghdr *umsg)
{
u32 tmp1, tmp2, tmp3;
int err;
if (err)
return -EFAULT;
- kmsg->msg_name = A(tmp1);
- kmsg->msg_iov = A(tmp2);
- kmsg->msg_control = A(tmp3);
+ kmsg->msg_name = (void *)A(tmp1);
+ kmsg->msg_iov = (struct iovec *)A(tmp2);
+ kmsg->msg_control = (void *)A(tmp3);
err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
return err;
}
-asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned user_flags)
+asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr *user_msg, unsigned user_flags)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
total_len = err;
if(kern_msg.msg_controllen) {
- struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
+ struct sol_cmsghdr *ucmsg = (struct sol_cmsghdr *)kern_msg.msg_control;
unsigned long *kcmsg;
compat_size_t cmlen;
return err;
}
-asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
+asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr *user_msg, unsigned int user_flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct msghdr kern_msg;
char addr[MAX_SOCK_ADDR];
struct socket *sock;
struct iovec *iov = iovstack;
- struct sockaddr __user *uaddr;
- int __user *uaddr_len;
+ struct sockaddr *uaddr;
+ int *uaddr_len;
unsigned long cmsg_ptr;
int err, total_len, len = 0;
SOLD("done");
}
-static int timod_optmgmt(unsigned int fd, int flag, char __user *opt_buf, int opt_len, int do_ret)
+static int timod_optmgmt(unsigned int fd, int flag, char *opt_buf, int opt_len, int do_ret)
{
int error, failed;
int ret_space, ret_len;
return 0;
}
-int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
- char __user *data_buf, int data_len, int flags)
+int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
+ char *data_buf, int data_len, int flags)
{
int ret, error, terror;
char *buf;
struct sol_socket_struct *sock;
mm_segment_t old_fs = get_fs();
long args[6];
- int (*sys_socketcall)(int, unsigned long __user *) =
- (int (*)(int, unsigned long __user *))SYS(socketcall);
- int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int))SYS(sendto);
+ int (*sys_socketcall)(int, unsigned long *) =
+ (int (*)(int, unsigned long *))SYS(socketcall);
+ int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto);
filp = current->files->fd[fd];
ino = filp->f_dentry->d_inode;
sock = (struct sol_socket_struct *)filp->private_data;
SOLD("entry");
- if (get_user(ret, (int __user *)A(ctl_buf)))
+ if (get_user(ret, (int *)A(ctl_buf)))
return -EFAULT;
switch (ret) {
case T_BIND_REQ:
printk("\n");
}
#endif
- err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr __user *)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
+ err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr*)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
if (err == data_len)
return 0;
if(err >= 0) {
return -EINVAL;
}
-int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, s32 __user *ctl_len,
- char __user *data_buf, int data_maxlen, s32 __user *data_len, int *flags_p)
+int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len,
+ char *data_buf, int data_maxlen, s32 *data_len, int *flags_p)
{
int error;
int oldflags;
struct T_unitdata_ind udi;
mm_segment_t old_fs = get_fs();
long args[6];
- char __user *tmpbuf;
+ char *tmpbuf;
int tmplen;
- int (*sys_socketcall)(int, unsigned long __user *) =
- (int (*)(int, unsigned long __user *))SYS(socketcall);
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *);
+ int (*sys_socketcall)(int, unsigned long *) =
+ (int (*)(int, unsigned long *))SYS(socketcall);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *);
SOLD("entry");
SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
oldflags = filp->f_flags;
filp->f_flags |= O_NONBLOCK;
SOLD("calling recvfrom");
- sys_recvfrom = (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
- error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr __user *)tmpbuf, ctl_len);
+ sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
+ error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len);
filp->f_flags = oldflags;
if (error < 0)
return error;
{
struct file *filp;
struct inode *ino;
- struct strbuf __user *ctlptr;
- struct strbuf __user *datptr;
+ struct strbuf *ctlptr, *datptr;
struct strbuf ctl, dat;
- int __user *flgptr;
+ int *flgptr;
int flags;
int error = -EBADF;
if (!ino->i_sock)
goto out;
- ctlptr = (struct strbuf __user *)A(arg1);
- datptr = (struct strbuf __user *)A(arg2);
- flgptr = (int __user *)A(arg3);
+ ctlptr = (struct strbuf *)A(arg1);
+ datptr = (struct strbuf *)A(arg2);
+ flgptr = (int *)A(arg3);
error = -EFAULT;
goto out;
}
- error = timod_getmsg(fd,A(ctl.buf),ctl.maxlen,&ctlptr->len,
- A(dat.buf),dat.maxlen,&datptr->len,&flags);
+ error = timod_getmsg(fd,(char*)A(ctl.buf),ctl.maxlen,&ctlptr->len,
+ (char*)A(dat.buf),dat.maxlen,&datptr->len,&flags);
if (!error && put_user(flags,flgptr))
error = -EFAULT;
{
struct file *filp;
struct inode *ino;
- struct strbuf __user *ctlptr;
- struct strbuf __user *datptr;
+ struct strbuf *ctlptr, *datptr;
struct strbuf ctl, dat;
int flags = (int) arg3;
int error = -EBADF;
(imajor(ino) != 30 || iminor(ino) != 1))
goto out;
- ctlptr = A(arg1);
- datptr = A(arg2);
+ ctlptr = (struct strbuf *)A(arg1);
+ datptr = (struct strbuf *)A(arg2);
error = -EFAULT;
dat.buf = 0;
}
- error = timod_putmsg(fd,A(ctl.buf),ctl.len,
- A(dat.buf),dat.len,flags);
+ error = timod_putmsg(fd,(char*)A(ctl.buf),ctl.len,
+ (char*)A(dat.buf),dat.len,flags);
out:
unlock_kernel();
SOLD("done");
source "fs/Kconfig.binfmt"
-config EXTERNFS
- tristate "Support for host-based filesystems"
-
config HOSTFS
tristate "Host filesystem"
help
If you'd like to be able to work with files stored on the host,
say Y or M here; otherwise say N.
-config HUMFS
- tristate 'Usable host filesystem'
- depends on EXTERNFS
-
config HPPFS
tristate "HoneyPot ProcFS"
help
config UNIX98_PTYS
bool "Unix98 PTY support"
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
- a physical terminal; the master device is used by a process to
- read data from and write data to the slave, thereby emulating a
- terminal. Typical programs for the master side are telnet servers
- and xterms.
-
- Linux has traditionally used the BSD-like names /dev/ptyxx for
- masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
- has a number of problems. The GNU C library glibc 2.1 and later,
- however, supports the Unix98 naming standard: in order to acquire a
- pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
- terminal is then made available to the process and the pseudo
- terminal slave can be accessed as /dev/pts/<number>. What was
- traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
-
- All modern Linux systems use the Unix98 ptys. Say Y unless
- you're on an embedded system and want to conserve memory.
-
-config LEGACY_PTYS
- bool "Legacy (BSD) PTY support"
- default y
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
- a physical terminal; the master device is used by a process to
- read data from and write data to the slave, thereby emulating a
- terminal. Typical programs for the master side are telnet servers
- and xterms.
-
- Linux has traditionally used the BSD-like names /dev/ptyxx
- for masters and /dev/ttyxx for slaves of pseudo
- terminals. This scheme has a number of problems, including
- security. This option enables these legacy devices; on most
- systems, it is safe to say N.
-
-
-config LEGACY_PTY_COUNT
- int "Maximum number of legacy PTY in use"
- depends on LEGACY_PTYS
- default "256"
- ---help---
- The maximum number of legacy PTYs that can be used at any one time.
- The default is 256, and should be more than enough. Embedded
- systems may want to reduce this to save memory.
- When not in use, each legacy PTY occupies 12 bytes on 32-bit
- architectures and 24 bytes on 64-bit architectures.
+config UNIX98_PTY_COUNT
+ int "Maximum number of Unix98 PTYs in use (0-2048)"
+ depends on UNIX98_PTYS
+ default "256"
config WATCHDOG
bool "Watchdog Timer Support"
-D_LARGEFILE64_SOURCE $(ARCH_INCLUDE) -Derrno=kernel_errno \
-Dsigprocmask=kernel_sigprocmask $(MODE_INCLUDE)
-check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
-
-CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
-
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
# These are needed for clean and mrproper, since in that case .config is not
# included; the values here are meaningless
CONFIG_NEST_LEVEL ?= 0
-CONFIG_KERNEL_HALF_GIGS ?= 0
+CONFIG_KERNEL_HALF_GIGS ?= 0
SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000)
CONFIG_KERNEL_STACK_ORDER ?= 2
STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] )
-ifndef START
- START = $$(($(TOP_ADDR) - $(SIZE)))
-endif
-
AFLAGS_vmlinux.lds.o = $(shell echo -U$(SUBARCH) \
- -DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
+ -DSTART=$$(($(TOP_ADDR) - $(SIZE))) -DELF_ARCH=$(ELF_ARCH) \
-DELF_FORMAT=\"$(ELF_FORMAT)\" $(CPP_MODE_TT) \
-DKERNEL_STACK_SIZE=$(STACK_SIZE))
$(Q)$(MAKE) $(build)=$@
export SUBARCH USER_CFLAGS OS
-
-all: linux
-
-define archhelp
- echo '* linux - Binary kernel image (./linux)'
-endef
TOP_ADDR = 0xc0000000
endif
-ifeq ($(CONFIG_MODE_SKAS),y)
- ifneq ($(CONFIG_MODE_TT),y)
- START = 0x8048000
- endif
-endif
-
CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
-
-ifneq ($(CONFIG_GPROF),y)
-ARCH_CFLAGS += -DUM_FASTCALL
-endif
-
ELF_ARCH = $(SUBARCH)
ELF_FORMAT = elf32-$(SUBARCH)
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_NET=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
-CONFIG_EXTERNFS=y
CONFIG_HOSTFS=y
-CONFIG_HUMFS=y
CONFIG_HPPFS=y
CONFIG_MCONSOLE=y
CONFIG_MAGIC_SYSRQ=y
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
+CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
-
-#
-# Class Based Kernel Resource Management
-#
-# CONFIG_CKRM is not set
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
+CONFIG_AUDIT=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
-# CONFIG_DELAY_ACCT is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
#
# Character Devices
CONFIG_CON_CHAN="xterm"
CONFIG_SSL_CHAN="pty"
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
# CONFIG_WATCHDOG is not set
CONFIG_UML_SOUND=y
CONFIG_SOUND=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_TUX is not set
CONFIG_DUMMY=y
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
CONFIG_REISERFS_FS=y
# CONFIG_REISERFS_CHECK is not set
# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
#
# Miscellaneous filesystems
# CONFIG_EFS_FS is not set
CONFIG_JFFS_FS=y
CONFIG_JFFS_FS_VERBOSE=0
-# CONFIG_JFFS_PROC_FS is not set
# CONFIG_JFFS2_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
#
# CONFIG_MTD_CFI is not set
# CONFIG_MTD_JEDECPROBE is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
# Self-contained MTD device drivers
#
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
CONFIG_MTD_BLKMTD=y
#pcap-objs := pcap_kern.o pcap_user.o $(PCAP)
net-objs := net_kern.o net_user.o
mconsole-objs := mconsole_kern.o mconsole_user.o
-hostaudio-objs := hostaudio_kern.o
+hostaudio-objs := hostaudio_kern.o hostaudio_user.o
ubd-objs := ubd_kern.o ubd_user.o
port-objs := port_kern.o port_user.o
harddog-objs := harddog_kern.o harddog_user.o
#include "irq_user.h"
#include "sigio.h"
#include "line.h"
-#include "os.h"
static void *not_configged_init(char *str, int device, struct chan_opts *opts)
{
.winch = 0,
};
-void generic_close(int fd, void *unused)
-{
- os_close_file(fd);
-}
-
-int generic_read(int fd, char *c_out, void *unused)
-{
- int n;
-
- n = os_read_file(fd, c_out, sizeof(*c_out));
-
- if(n == -EAGAIN)
- return(0);
- else if(n == 0)
- return(-EIO);
- return(n);
-}
-
-/* XXX Trivial wrapper around os_write_file */
-
-int generic_write(int fd, const char *buf, int n, void *unused)
-{
- return(os_write_file(fd, buf, n));
-}
-
-int generic_window_size(int fd, void *unused, unsigned short *rows_out,
- unsigned short *cols_out)
-{
- int rows, cols;
- int ret;
-
- ret = os_window_size(fd, &rows, &cols);
- if(ret < 0)
- return(ret);
-
- ret = ((*rows_out != rows) || (*cols_out != cols));
-
- *rows_out = rows;
- *cols_out = cols;
-
- return(ret);
-}
-
-void generic_free(void *data)
-{
- kfree(data);
-}
-
static void tty_receive_char(struct tty_struct *tty, char ch)
{
if(tty == NULL) return;
#include "choose-mode.h"
#include "mode.h"
-static void winch_handler(int sig)
+void generic_close(int fd, void *unused)
{
+ os_close_file(fd);
}
-struct winch_data {
- int pty_fd;
- int pipe_fd;
- int close_me;
-};
+int generic_read(int fd, char *c_out, void *unused)
+{
+ int n;
+
+ n = os_read_file(fd, c_out, sizeof(*c_out));
+
+ if(n == -EAGAIN)
+ return(0);
+ else if(n == 0)
+ return(-EIO);
+ return(n);
+}
+
+/* XXX Trivial wrapper around os_write_file */
+
+int generic_write(int fd, const char *buf, int n, void *unused)
+{
+ return(os_write_file(fd, buf, n));
+}
-/* XXX This breaks horribly (by hanging UML) when moved to chan_kern.c -
- * needs investigation
- */
int generic_console_write(int fd, const char *buf, int n, void *unused)
{
struct termios save, new;
return(err);
}
+int generic_window_size(int fd, void *unused, unsigned short *rows_out,
+ unsigned short *cols_out)
+{
+ int rows, cols;
+ int ret;
+
+ ret = os_window_size(fd, &rows, &cols);
+ if(ret < 0)
+ return(ret);
+
+ ret = ((*rows_out != rows) || (*cols_out != cols));
+
+ *rows_out = rows;
+ *cols_out = cols;
+
+ return(ret);
+}
+
+void generic_free(void *data)
+{
+ kfree(data);
+}
+
+static void winch_handler(int sig)
+{
+}
+
+struct winch_data {
+ int pty_fd;
+ int pipe_fd;
+ int close_me;
+};
+
static int winch_thread(void *arg)
{
struct winch_data *data = arg;
#define PATH_LEN_V2 MAXPATHLEN
struct cow_header_v2 {
- __u32 magic;
- __u32 version;
+ unsigned long magic;
+ unsigned long version;
char backing_file[PATH_LEN_V2];
time_t mtime;
__u64 size;
#include <stdlib.h>
#include <unistd.h>
#include <termios.h>
-#include <errno.h>
#include "user.h"
#include "user_util.h"
#include "chan_user.h"
int fd_open(int input, int output, int primary, void *d, char **dev_out)
{
struct fd_chan *data = d;
- int err;
if(data->raw && isatty(data->fd)){
- CATCH_EINTR(err = tcgetattr(data->fd, &data->tt));
- if(err)
- return(err);
-
- err = raw(data->fd);
- if(err)
- return(err);
+ tcgetattr(data->fd, &data->tt);
+ raw(data->fd, 0);
}
sprintf(data->str, "%d", data->fd);
*dev_out = data->str;
void fd_close(int fd, void *d)
{
struct fd_chan *data = d;
- int err;
if(data->raw && isatty(fd)){
- CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &data->tt));
- if(err)
- printk("Failed to restore terminal state - "
- "errno = %d\n", -err);
+ tcsetattr(fd, TCSAFLUSH, &data->tt);
data->raw = 0;
}
}
timer_alive = 1;
unlock_kernel();
- return nonseekable_open(inode, file);
+ return 0;
}
extern void stop_watchdog(int in_fd, int out_fd);
static ssize_t harddog_write(struct file *file, const char *data, size_t len,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
#include "asm/uaccess.h"
#include "kern_util.h"
#include "init.h"
-#include "os.h"
-
-struct hostaudio_state {
- int fd;
-};
-
-struct hostmixer_state {
- int fd;
-};
-
-#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp"
-#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer"
+#include "hostaudio.h"
/* Only changed from linux_main at boot time */
char *dsp = HOSTAUDIO_DEV_DSP;
" The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n"
#define MIXER_HELP \
-" This is used to specify the host mixer device to the hostaudio driver.\n"\
+" This is used to specify the host mixer device to the hostaudio driver.\n" \
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
#ifndef MODULE
if(kbuf == NULL)
return(-ENOMEM);
- err = os_read_file(state->fd, kbuf, count);
+ err = hostaudio_read_user(state, kbuf, count, ppos);
if(err < 0)
goto out;
if(copy_from_user(kbuf, buffer, count))
goto out;
- err = os_write_file(state->fd, kbuf, count);
+ err = hostaudio_write_user(state, kbuf, count, ppos);
if(err < 0)
goto out;
- *ppos += err;
out:
kfree(kbuf);
break;
}
- err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data);
+ err = hostaudio_ioctl_user(state, cmd, (unsigned long) &data);
switch(cmd){
case SNDCTL_DSP_SPEED:
#endif
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
- if(state == NULL)
- return(-ENOMEM);
+ if(state == NULL) return(-ENOMEM);
if(file->f_mode & FMODE_READ) r = 1;
if(file->f_mode & FMODE_WRITE) w = 1;
- ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
+ ret = hostaudio_open_user(state, r, w, dsp);
if(ret < 0){
kfree(state);
return(ret);
}
- state->fd = ret;
file->private_data = state;
return(0);
}
static int hostaudio_release(struct inode *inode, struct file *file)
{
struct hostaudio_state *state = file->private_data;
+ int ret;
#ifdef DEBUG
printk("hostaudio: release called\n");
#endif
- os_close_file(state->fd);
+ ret = hostaudio_release_user(state);
kfree(state);
- return(0);
+ return(ret);
}
/* /dev/mixer file operations */
printk("hostmixer: ioctl called\n");
#endif
- return(os_ioctl_generic(state->fd, cmd, arg));
+ return(hostmixer_ioctl_mixdev_user(state, cmd, arg));
}
static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
if(file->f_mode & FMODE_READ) r = 1;
if(file->f_mode & FMODE_WRITE) w = 1;
- ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
+ ret = hostmixer_open_mixdev_user(state, r, w, mixer);
if(ret < 0){
- printk("hostaudio_open_mixdev failed to open '%s', err = %d\n",
- dsp, -ret);
kfree(state);
return(ret);
}
static int hostmixer_release(struct inode *inode, struct file *file)
{
struct hostmixer_state *state = file->private_data;
+ int ret;
#ifdef DEBUG
printk("hostmixer: release called\n");
#endif
- os_close_file(state->fd);
+ ret = hostmixer_release_mixdev_user(state);
kfree(state);
- return(0);
+ return(ret);
}
LIST_HEAD(mc_requests);
-static void mc_work_proc(void *unused)
+void mc_work_proc(void *unused)
{
struct mconsole_entry *req;
unsigned long flags;
+ int done;
- while(!list_empty(&mc_requests)){
+ do {
local_save_flags(flags);
req = list_entry(mc_requests.next, struct mconsole_entry,
list);
list_del(&req->list);
+ done = list_empty(&mc_requests);
local_irq_restore(flags);
req->request.cmd->handler(&req->request);
kfree(req);
- }
+ } while(!done);
}
DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
-static irqreturn_t mconsole_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
+irqreturn_t mconsole_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
int fd;
struct mconsole_entry *new;
}
}
}
- if(!list_empty(&mc_requests))
- schedule_work(&mconsole_work);
+ if(!list_empty(&mc_requests)) schedule_work(&mconsole_work);
reactivate_fd(fd, MCONSOLE_IRQ);
return(IRQ_HANDLED);
}
ptr += strlen("sysrq");
while(isspace(*ptr)) ptr++;
- mconsole_reply(req, "", 0, 0);
handle_sysrq(*ptr, ¤t->thread.regs, NULL);
+ mconsole_reply(req, "", 0, 0);
}
#else
void mconsole_sysrq(struct mc_request *req)
#include "linux/inetdevice.h"
#include "linux/ctype.h"
#include "linux/bootmem.h"
-#include "linux/ethtool.h"
-#include "asm/uaccess.h"
#include "user_util.h"
#include "kern_util.h"
#include "net_kern.h"
static int uml_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- static const struct ethtool_drvinfo info = {
- .cmd = ETHTOOL_GDRVINFO,
- .driver = "uml virtual ethernet",
- .version = "42",
- };
- void *useraddr;
- u32 ethcmd;
-
- switch (cmd) {
- case SIOCETHTOOL:
- useraddr = ifr->ifr_data;
- if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
- switch (ethcmd) {
- case ETHTOOL_GDRVINFO:
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
- default:
- return -EINVAL;
- }
+ return(-EINVAL);
}
void uml_net_user_timer_expire(unsigned long _conn)
--- /dev/null
+/*
+ * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
+ * James Leu (jleu@mindspring.net).
+ * Copyright (C) 2001 by various other people who didn't put their name here.
+ * Licensed under the GPL.
+ */
+
+#include "linux/config.h"
+#include "linux/kernel.h"
+#include "linux/netdevice.h"
+#include "linux/rtnetlink.h"
+#include "linux/skbuff.h"
+#include "linux/socket.h"
+#include "linux/spinlock.h"
+#include "linux/module.h"
+#include "linux/init.h"
+#include "linux/etherdevice.h"
+#include "linux/list.h"
+#include "linux/inetdevice.h"
+#include "linux/ctype.h"
+#include "linux/bootmem.h"
+#include "user_util.h"
+#include "kern_util.h"
+#include "net_kern.h"
+#include "net_user.h"
+#include "mconsole_kern.h"
+#include "init.h"
+#include "irq_user.h"
+
+static spinlock_t opened_lock = SPIN_LOCK_UNLOCKED;
+LIST_HEAD(opened);
+
+static int uml_net_rx(struct net_device *dev)
+{
+ struct uml_net_private *lp = dev->priv;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ /* If we can't allocate memory, try again next round. */
+ if ((skb = dev_alloc_skb(dev->mtu)) == NULL) {
+ lp->stats.rx_dropped++;
+ return 0;
+ }
+
+ skb->dev = dev;
+ skb_put(skb, dev->mtu);
+ skb->mac.raw = skb->data;
+ pkt_len = (*lp->read)(lp->fd, &skb, lp);
+
+ if (pkt_len > 0) {
+ skb_trim(skb, pkt_len);
+ skb->protocol = (*lp->protocol)(skb);
+ netif_rx(skb);
+
+ lp->stats.rx_bytes += skb->len;
+ lp->stats.rx_packets++;
+ return pkt_len;
+ }
+
+ kfree_skb(skb);
+ return pkt_len;
+}
+
+void uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct uml_net_private *lp = dev->priv;
+ int err;
+
+ if(!netif_running(dev))
+ return;
+
+ spin_lock(&lp->lock);
+ while((err = uml_net_rx(dev)) > 0) ;
+ if(err < 0) {
+ printk(KERN_ERR
+ "Device '%s' read returned %d, shutting it down\n",
+ dev->name, err);
+ dev_close(dev);
+ goto out;
+ }
+ reactivate_fd(lp->fd, UM_ETH_IRQ);
+
+ out:
+ spin_unlock(&lp->lock);
+}
+
+static int uml_net_open(struct net_device *dev)
+{
+ struct uml_net_private *lp = dev->priv;
+ char addr[sizeof("255.255.255.255\0")];
+ int err;
+
+ spin_lock(&lp->lock);
+
+ if(lp->fd >= 0){
+ err = -ENXIO;
+ goto out;
+ }
+
+ if(!lp->have_mac){
+ dev_ip_addr(dev, addr, &lp->mac[2]);
+ set_ether_mac(dev, lp->mac);
+ }
+
+ lp->fd = (*lp->open)(&lp->user);
+ if(lp->fd < 0){
+ err = lp->fd;
+ goto out;
+ }
+
+ err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
+ SA_INTERRUPT | SA_SHIRQ, dev->name, dev);
+ if(err != 0){
+ printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
+ if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
+ lp->fd = -1;
+ err = -ENETUNREACH;
+ }
+
+ lp->tl.data = (unsigned long) &lp->user;
+ netif_start_queue(dev);
+
+ spin_lock(&opened_lock);
+ list_add(&lp->list, &opened);
+ spin_unlock(&opened_lock);
+ MOD_INC_USE_COUNT;
+ out:
+ spin_unlock(&lp->lock);
+ return(err);
+}
+
+static int uml_net_close(struct net_device *dev)
+{
+ struct uml_net_private *lp = dev->priv;
+
+ netif_stop_queue(dev);
+ spin_lock(&lp->lock);
+
+ free_irq(dev->irq, dev);
+ if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
+ lp->fd = -1;
+ spin_lock(&opened_lock);
+ list_del(&lp->list);
+ spin_unlock(&opened_lock);
+
+ MOD_DEC_USE_COUNT;
+ spin_unlock(&lp->lock);
+ return 0;
+}
+
+static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct uml_net_private *lp = dev->priv;
+ unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ len = (*lp->write)(lp->fd, &skb, lp);
+
+ if(len == skb->len) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* this is normally done in the interrupt when tx finishes */
+ netif_wake_queue(dev);
+ }
+ else if(len == 0){
+ netif_start_queue(dev);
+ lp->stats.tx_dropped++;
+ }
+ else {
+ netif_start_queue(dev);
+ printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
+{
+ struct uml_net_private *lp = dev->priv;
+ return &lp->stats;
+}
+
+static void uml_net_set_multicast_list(struct net_device *dev)
+{
+ if (dev->flags & IFF_PROMISC) return;
+ else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
+ else dev->flags &= ~IFF_ALLMULTI;
+}
+
+static void uml_net_tx_timeout(struct net_device *dev)
+{
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int uml_net_set_mac(struct net_device *dev, void *addr)
+{
+ struct uml_net_private *lp = dev->priv;
+ struct sockaddr *hwaddr = addr;
+
+ spin_lock(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+ spin_unlock(&lp->lock);
+
+ return(0);
+}
+
+static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct uml_net_private *lp = dev->priv;
+ int err = 0;
+
+ spin_lock(&lp->lock);
+
+ new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
+ if(new_mtu < 0){
+ err = new_mtu;
+ goto out;
+ }
+
+ dev->mtu = new_mtu;
+
+ out:
+ spin_unlock(&lp->lock);
+ return err;
+}
+
+static int uml_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return(-EINVAL);
+}
+
+void uml_net_user_timer_expire(unsigned long _conn)
+{
+#ifdef undef
+ struct connection *conn = (struct connection *)_conn;
+
+ dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
+ do_connect(conn);
+#endif
+}
+
+/*
+ * default do nothing hard header packet routines for struct net_device init.
+ * real ethernet transports will overwrite with real routines.
+ */
+static int uml_net_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+ return(0); /* no change */
+}
+
+static int uml_net_rebuild_header(struct sk_buff *skb)
+{
+ return(0); /* ignore */
+}
+
+static int uml_net_header_cache(struct neighbour *neigh, struct hh_cache *hh)
+{
+ return(-1); /* fail */
+}
+
+static void uml_net_header_cache_update(struct hh_cache *hh,
+ struct net_device *dev, unsigned char * haddr)
+{
+ /* ignore */
+}
+
+static int uml_net_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+ return(0); /* nothing */
+}
+
+static spinlock_t devices_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head devices = LIST_HEAD_INIT(devices);
+
+static int eth_configure(int n, void *init, char *mac,
+ struct transport *transport)
+{
+ struct uml_net *device;
+ struct net_device *dev;
+ struct uml_net_private *lp;
+ int err, size;
+
+ size = transport->private_size + sizeof(struct uml_net_private) +
+ sizeof(((struct uml_net_private *) 0)->user);
+
+ device = kmalloc(sizeof(*device), GFP_KERNEL);
+ if (device == NULL) {
+ printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
+ return(1);
+ }
+
+ memset(device, 0, sizeof(*device));
+ INIT_LIST_HEAD(&device->list);
+ device->index = n;
+
+ spin_lock(&devices_lock);
+ list_add(&device->list, &devices);
+ spin_unlock(&devices_lock);
+
+ if (setup_etheraddr(mac, device->mac))
+ device->have_mac = 1;
+
+ printk(KERN_INFO "Netdevice %d ", n);
+ if (device->have_mac)
+ printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
+ device->mac[0], device->mac[1],
+ device->mac[2], device->mac[3],
+ device->mac[4], device->mac[5]);
+ printk(": ");
+ dev = alloc_etherdev(size);
+ if (dev == NULL) {
+ printk(KERN_ERR "eth_configure: failed to allocate device\n");
+ return 1;
+ }
+
+ /* If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+ * and fail.
+ */
+ snprintf(dev->name, sizeof(dev->name), "eth%d", n);
+ device->dev = dev;
+
+ dev->hard_header = uml_net_hard_header;
+ dev->rebuild_header = uml_net_rebuild_header;
+ dev->hard_header_cache = uml_net_header_cache;
+ dev->header_cache_update= uml_net_header_cache_update;
+ dev->hard_header_parse = uml_net_header_parse;
+
+ (*transport->kern->init)(dev, init);
+
+ dev->mtu = transport->user->max_packet;
+ dev->open = uml_net_open;
+ dev->hard_start_xmit = uml_net_start_xmit;
+ dev->stop = uml_net_close;
+ dev->get_stats = uml_net_get_stats;
+ dev->set_multicast_list = uml_net_set_multicast_list;
+ dev->tx_timeout = uml_net_tx_timeout;
+ dev->set_mac_address = uml_net_set_mac;
+ dev->change_mtu = uml_net_change_mtu;
+ dev->do_ioctl = uml_net_ioctl;
+ dev->watchdog_timeo = (HZ >> 1);
+ dev->irq = UM_ETH_IRQ;
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+ if (err) {
+ device->dev = NULL;
+ /* XXX: should we call ->remove() here? */
+ free_netdev(dev);
+ return 1;
+ }
+ lp = dev->priv;
+
+ INIT_LIST_HEAD(&lp->list);
+ spin_lock_init(&lp->lock);
+ lp->dev = dev;
+ lp->fd = -1;
+ lp->mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 };
+ lp->have_mac = device->have_mac;
+ lp->protocol = transport->kern->protocol;
+ lp->open = transport->user->open;
+ lp->close = transport->user->close;
+ lp->remove = transport->user->remove;
+ lp->read = transport->kern->read;
+ lp->write = transport->kern->write;
+ lp->add_address = transport->user->add_address;
+ lp->delete_address = transport->user->delete_address;
+ lp->set_mtu = transport->user->set_mtu;
+
+ init_timer(&lp->tl);
+ lp->tl.function = uml_net_user_timer_expire;
+ if (lp->have_mac)
+ memcpy(lp->mac, device->mac, sizeof(lp->mac));
+
+ if (transport->user->init)
+ (*transport->user->init)(&lp->user, dev);
+
+ if (device->have_mac)
+ set_ether_mac(dev, device->mac);
+ return(0);
+}
+
+static struct uml_net *find_device(int n)
+{
+ struct uml_net *device;
+ struct list_head *ele;
+
+ spin_lock(&devices_lock);
+ list_for_each(ele, &devices){
+ device = list_entry(ele, struct uml_net, list);
+ if(device->index == n)
+ goto out;
+ }
+ device = NULL;
+ out:
+ spin_unlock(&devices_lock);
+ return(device);
+}
+
+static int eth_parse(char *str, int *index_out, char **str_out)
+{
+ char *end;
+ int n;
+
+ n = simple_strtoul(str, &end, 0);
+ if(end == str){
+ printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
+ return(1);
+ }
+ if(n < 0){
+ printk(KERN_ERR "eth_setup: device %d is negative\n", n);
+ return(1);
+ }
+ str = end;
+ if(*str != '='){
+ printk(KERN_ERR
+ "eth_setup: expected '=' after device number\n");
+ return(1);
+ }
+ str++;
+ if(find_device(n)){
+ printk(KERN_ERR "eth_setup: Device %d already configured\n",
+ n);
+ return(1);
+ }
+ if(index_out) *index_out = n;
+ *str_out = str;
+ return(0);
+}
+
+struct eth_init {
+ struct list_head list;
+ char *init;
+ int index;
+};
+
+/* Filled in at boot time. Will need locking if the transports become
+ * modular.
+ */
+struct list_head transports = LIST_HEAD_INIT(transports);
+
+/* Filled in during early boot */
+struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
+
+static int check_transport(struct transport *transport, char *eth, int n,
+ void **init_out, char **mac_out)
+{
+ int len;
+
+ len = strlen(transport->name);
+ if(strncmp(eth, transport->name, len))
+ return(0);
+
+ eth += len;
+ if(*eth == ',')
+ eth++;
+ else if(*eth != '\0')
+ return(0);
+
+ *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
+ if(*init_out == NULL)
+ return(1);
+
+ if(!transport->setup(eth, mac_out, *init_out)){
+ kfree(*init_out);
+ *init_out = NULL;
+ }
+ return(1);
+}
+
+void register_transport(struct transport *new)
+{
+ struct list_head *ele, *next;
+ struct eth_init *eth;
+ void *init;
+ char *mac = NULL;
+ int match;
+
+ list_add(&new->list, &transports);
+
+ list_for_each_safe(ele, next, ð_cmd_line){
+ eth = list_entry(ele, struct eth_init, list);
+ match = check_transport(new, eth->init, eth->index, &init,
+ &mac);
+ if(!match)
+ continue;
+ else if(init != NULL){
+ eth_configure(eth->index, init, mac, new);
+ kfree(init);
+ }
+ list_del(ð->list);
+ }
+}
+
+static int eth_setup_common(char *str, int index)
+{
+ struct list_head *ele;
+ struct transport *transport;
+ void *init;
+ char *mac = NULL;
+
+ list_for_each(ele, &transports){
+ transport = list_entry(ele, struct transport, list);
+ if(!check_transport(transport, str, index, &init, &mac))
+ continue;
+ if(init != NULL){
+ eth_configure(index, init, mac, transport);
+ kfree(init);
+ }
+ return(1);
+ }
+ return(0);
+}
+
+static int eth_setup(char *str)
+{
+ struct eth_init *new;
+ int n, err;
+
+ err = eth_parse(str, &n, &str);
+ if(err) return(1);
+
+ new = alloc_bootmem(sizeof(new));
+ if (new == NULL){
+ printk("eth_init : alloc_bootmem failed\n");
+ return(1);
+ }
+
+ INIT_LIST_HEAD(&new->list);
+ new->index = n;
+ new->init = str;
+
+ list_add_tail(&new->list, ð_cmd_line);
+ return(1);
+}
+
+__setup("eth", eth_setup);
+__uml_help(eth_setup,
+"eth[0-9]+=<transport>,<options>\n"
+" Configure a network device.\n\n"
+);
+
+static int eth_init(void)
+{
+ struct list_head *ele, *next;
+ struct eth_init *eth;
+
+ list_for_each_safe(ele, next, ð_cmd_line){
+ eth = list_entry(ele, struct eth_init, list);
+
+ if(eth_setup_common(eth->init, eth->index))
+ list_del(ð->list);
+ }
+
+ return(1);
+}
+
+__initcall(eth_init);
+
+static int net_config(char *str)
+{
+ int n, err;
+
+ err = eth_parse(str, &n, &str);
+ if(err) return(err);
+
+ str = uml_strdup(str);
+ if(str == NULL){
+ printk(KERN_ERR "net_config failed to strdup string\n");
+ return(-1);
+ }
+ err = !eth_setup_common(str, n);
+ if(err)
+ kfree(str);
+ return(err);
+}
+
+static int net_remove(char *str)
+{
+ struct uml_net *device;
+ struct net_device *dev;
+ struct uml_net_private *lp;
+ char *end;
+ int n;
+
+ n = simple_strtoul(str, &end, 0);
+ if((*end != '\0') || (end == str))
+ return(-1);
+
+ device = find_device(n);
+ if(device == NULL)
+ return(0);
+
+ dev = device->dev;
+ lp = dev->priv;
+ if(lp->fd > 0) return(-1);
+ if(lp->remove != NULL) (*lp->remove)(&lp->user);
+ unregister_netdev(dev);
+
+ list_del(&device->list);
+ free_netdev(device);
+ return(0);
+}
+
+static struct mc_device net_mc = {
+ .name = "eth",
+ .config = net_config,
+ .get_config = NULL,
+ .remove = net_remove,
+};
+
+static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ u32 addr = ifa->ifa_address;
+ u32 netmask = ifa->ifa_mask;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct uml_net_private *lp;
+ void (*proc)(unsigned char *, unsigned char *, void *);
+ unsigned char addr_buf[4], netmask_buf[4];
+
+ if(dev->open != uml_net_open) return(NOTIFY_DONE);
+
+ lp = dev->priv;
+
+ proc = NULL;
+ switch (event){
+ case NETDEV_UP:
+ proc = lp->add_address;
+ break;
+ case NETDEV_DOWN:
+ proc = lp->delete_address;
+ break;
+ }
+ if(proc != NULL){
+ addr_buf[0] = addr & 0xff;
+ addr_buf[1] = (addr >> 8) & 0xff;
+ addr_buf[2] = (addr >> 16) & 0xff;
+ addr_buf[3] = addr >> 24;
+ netmask_buf[0] = netmask & 0xff;
+ netmask_buf[1] = (netmask >> 8) & 0xff;
+ netmask_buf[2] = (netmask >> 16) & 0xff;
+ netmask_buf[3] = netmask >> 24;
+ (*proc)(addr_buf, netmask_buf, &lp->user);
+ }
+ return(NOTIFY_DONE);
+}
+
+struct notifier_block uml_inetaddr_notifier = {
+ .notifier_call = uml_inetaddr_event,
+};
+
+static int uml_net_init(void)
+{
+ struct list_head *ele;
+ struct uml_net_private *lp;
+ struct in_device *ip;
+ struct in_ifaddr *in;
+
+ mconsole_register_dev(&net_mc);
+ register_inetaddr_notifier(¨_inetaddr_notifier);
+
+ /* Devices may have been opened already, so the uml_inetaddr_notifier
+ * didn't get a chance to run for them. This fakes it so that
+ * addresses which have already been set up get handled properly.
+ */
+ list_for_each(ele, &opened){
+ lp = list_entry(ele, struct uml_net_private, list);
+ ip = lp->dev->ip_ptr;
+ if(ip == NULL) continue;
+ in = ip->ifa_list;
+ while(in != NULL){
+ uml_inetaddr_event(NULL, NETDEV_UP, in);
+ in = in->ifa_next;
+ }
+ }
+
+ return(0);
+}
+
+__initcall(uml_net_init);
+
+static void close_devices(void)
+{
+ struct list_head *ele;
+ struct uml_net_private *lp;
+
+ list_for_each(ele, &opened){
+ lp = list_entry(ele, struct uml_net_private, list);
+ if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
+ if(lp->remove != NULL) (*lp->remove)(&lp->user);
+ }
+}
+
+__uml_exitcall(close_devices);
+
+int setup_etheraddr(char *str, unsigned char *addr)
+{
+ char *end;
+ int i;
+
+ if(str == NULL)
+ return(0);
+ for(i=0;i<6;i++){
+ addr[i] = simple_strtoul(str, &end, 16);
+ if((end == str) ||
+ ((*end != ':') && (*end != ',') && (*end != '\0'))){
+ printk(KERN_ERR
+ "setup_etheraddr: failed to parse '%s' "
+ "as an ethernet address\n", str);
+ return(0);
+ }
+ str = end + 1;
+ }
+ if(addr[0] & 1){
+ printk(KERN_ERR
+ "Attempt to assign a broadcast ethernet address to a "
+ "device disallowed\n");
+ return(0);
+ }
+ return(1);
+}
+
+void dev_ip_addr(void *d, char *buf, char *bin_buf)
+{
+ struct net_device *dev = d;
+ struct in_device *ip = dev->ip_ptr;
+ struct in_ifaddr *in;
+ u32 addr;
+
+ if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
+ printk(KERN_WARNING "dev_ip_addr - device not assigned an "
+ "IP address\n");
+ return;
+ }
+ addr = in->ifa_address;
+ sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
+ (addr >> 16) & 0xff, addr >> 24);
+ if(bin_buf){
+ bin_buf[0] = addr & 0xff;
+ bin_buf[1] = (addr >> 8) & 0xff;
+ bin_buf[2] = (addr >> 16) & 0xff;
+ bin_buf[3] = addr >> 24;
+ }
+}
+
+void set_ether_mac(void *d, unsigned char *addr)
+{
+ struct net_device *dev = d;
+
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
+}
+
+struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
+{
+ if((skb != NULL) && (skb_tailroom(skb) < extra)){
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
+ dev_kfree_skb(skb);
+ skb = skb2;
+ }
+ if(skb != NULL) skb_put(skb, extra);
+ return(skb);
+}
+
+void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
+ void *),
+ void *arg)
+{
+ struct net_device *dev = d;
+ struct in_device *ip = dev->ip_ptr;
+ struct in_ifaddr *in;
+ unsigned char address[4], netmask[4];
+
+ if(ip == NULL) return;
+ in = ip->ifa_list;
+ while(in != NULL){
+ address[0] = in->ifa_address & 0xff;
+ address[1] = (in->ifa_address >> 8) & 0xff;
+ address[2] = (in->ifa_address >> 16) & 0xff;
+ address[3] = in->ifa_address >> 24;
+ netmask[0] = in->ifa_mask & 0xff;
+ netmask[1] = (in->ifa_mask >> 8) & 0xff;
+ netmask[2] = (in->ifa_mask >> 16) & 0xff;
+ netmask[3] = in->ifa_mask >> 24;
+ (*cb)(address, netmask, arg);
+ in = in->ifa_next;
+ }
+}
+
+int dev_netmask(void *d, void *m)
+{
+ struct net_device *dev = d;
+ struct in_device *ip = dev->ip_ptr;
+ struct in_ifaddr *in;
+ __u32 *mask_out = m;
+
+ if(ip == NULL)
+ return(1);
+
+ in = ip->ifa_list;
+ if(in == NULL)
+ return(1);
+
+ *mask_out = in->ifa_mask;
+ return(0);
+}
+
+void *get_output_buffer(int *len_out)
+{
+ void *ret;
+
+ ret = (void *) __get_free_pages(GFP_KERNEL, 0);
+ if(ret) *len_out = PAGE_SIZE;
+ else *len_out = 0;
+ return(ret);
+}
+
+void free_output_buffer(void *buffer)
+{
+ free_pages((unsigned long) buffer, 0);
+}
+
+int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
+ char **gate_addr)
+{
+ char *remain;
+
+ remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
+ if(remain != NULL){
+ printk("tap_setup_common - Extra garbage on specification : "
+ "'%s'\n", remain);
+ return(1);
+ }
+
+ return(0);
+}
+
+unsigned short eth_protocol(struct sk_buff *skb)
+{
+ return(eth_type_trans(skb, skb->dev));
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
pe_data.stdout = fds[1];
pid = run_helper(change_pre_exec, &pe_data, argv, NULL);
- read_output(fds[0], output, output_len);
- os_close_file(fds[0]);
os_close_file(fds[1]);
- CATCH_EINTR(waitpid(pid, NULL, 0));
+ read_output(fds[0], output, output_len);
+ waitpid(pid, NULL, 0);
return(pid);
}
int port_open(int input, int output, int primary, void *d, char **dev_out)
{
struct port_chan *data = d;
- int fd, err;
+ int fd;
fd = port_wait(data->kernel_data);
if((fd >= 0) && data->raw){
- CATCH_EINTR(err = tcgetattr(fd, &data->tt));
- if(err)
- return(err);
-
- err = raw(fd);
- if(err)
- return(err);
+ tcgetattr(fd, &data->tt);
+ raw(fd, 0);
}
*dev_out = data->dev;
return(fd);
int port_listen_fd(int port)
{
struct sockaddr_in addr;
- int fd, err, arg;
+ int fd, err;
fd = socket(PF_INET, SOCK_STREAM, 0);
if(fd == -1)
return(-errno);
- arg = 1;
- if(setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &arg, sizeof(arg)) < 0){
- err = -errno;
- goto out;
- }
-
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
{
struct pty_chan *data = d;
char *dev;
- int fd, err;
+ int fd;
fd = get_pty();
if(fd < 0){
return(-errno);
}
if(data->raw){
- CATCH_EINTR(err = tcgetattr(fd, &data->tt));
- if(err)
- return(err);
-
- err = raw(fd);
- if(err)
- return(err);
+ tcgetattr(fd, &data->tt);
+ raw(fd, 0);
}
dev = ptsname(fd);
int pty_open(int input, int output, int primary, void *d, char **dev_out)
{
struct pty_chan *data = d;
- int fd, err;
+ int fd;
char dev[sizeof("/dev/ptyxx\0")] = "/dev/ptyxx";
fd = getmaster(dev);
- if(fd < 0)
- return(-errno);
-
- if(data->raw){
- err = raw(fd);
- if(err)
- return(err);
- }
+ if(fd < 0) return(-errno);
+ if(data->raw) raw(fd, 0);
if(data->announce) (*data->announce)(dev, data->dev);
sprintf(data->dev_name, "%s", dev);
#include <stddef.h>
#include <sched.h>
#include <string.h>
-#include <errno.h>
+#include <sys/errno.h>
#include <sys/termios.h>
#include <sys/wait.h>
#include <sys/signal.h>
printk("%s", output);
kfree(output);
}
- CATCH_EINTR(err = waitpid(pid, &status, 0));
- if(err < 0)
- err = errno;
+ if(waitpid(pid, &status, 0) < 0) err = errno;
else if(!WIFEXITED(status) || (WEXITSTATUS(status) != 0)){
printk("'%s' didn't exit with status 0\n", argv[0]);
err = -EINVAL;
#include <stddef.h>
#include <sched.h>
#include <string.h>
-#include <errno.h>
+#include <sys/errno.h>
#include <sys/wait.h>
#include <sys/signal.h>
#include "user_util.h"
}
#endif
- CATCH_EINTR(err = waitpid(pri->pid, &status, WNOHANG));
- if(err < 0) {
+ err = waitpid(pri->pid, &status, WNOHANG);
+ if(err<0) {
printk("slirp_close: waitpid returned %d\n", errno);
return;
}
- if(err == 0) {
+ if(err==0) {
printk("slirp_close: process %d has not exited\n");
return;
}
late_initcall(stdio_init);
-static void uml_console_write(struct console *console, const char *string,
- unsigned len)
+static void console_write(struct console *console, const char *string,
+ unsigned len)
{
struct line *line = &vts[console->index];
up(&line->sem);
}
-static struct tty_driver *uml_console_device(struct console *c, int *index)
+static struct tty_driver *console_device(struct console *c, int *index)
{
*index = c->index;
return console_driver;
}
-static int uml_console_setup(struct console *co, char *options)
+static int console_setup(struct console *co, char *options)
{
return(0);
}
static struct console stdiocons = {
name: "tty",
- write: uml_console_write,
- device: uml_console_device,
- setup: uml_console_setup,
+ write: console_write,
+ device: console_device,
+ setup: console_setup,
flags: CON_PRINTBUFFER,
index: -1,
};
int tty_open(int input, int output, int primary, void *d, char **dev_out)
{
struct tty_chan *data = d;
- int fd, err;
+ int fd;
fd = os_open_file(data->dev, of_set_rw(OPENFLAGS(), input, output), 0);
if(fd < 0) return(fd);
if(data->raw){
- CATCH_EINTR(err = tcgetattr(fd, &data->tt));
- if(err)
- return(err);
-
- err = raw(fd);
- if(err)
- return(err);
+ tcgetattr(fd, &data->tt);
+ raw(fd, 0);
}
*dev_out = data->dev;
* to write the data to disk first, then we can map the disk
* page in and continue normally from there.
*/
- if((rq_data_dir(req) == WRITE) && !is_remapped(req->buffer, dev->fd, io_req->offset + dev->cow.data_offset)){
+ if((rq_data_dir(req) == WRITE) && !is_remapped(req->buffer)){
io_req->map_fd = dev->fd;
io_req->map_offset = io_req->offset +
dev->cow.data_offset;
}
static int ubd_check_remapped(int fd, unsigned long address, int is_write,
- __u64 offset, int is_user)
+ __u64 offset)
{
__u64 bitmap_offset;
unsigned long new_bitmap[2];
int i, err, n;
- /* This can only fix kernelspace faults */
- if(is_user)
- return(0);
-
- /* ubd-mmap is only enabled in skas mode */
- if(CHOOSE_MODE(1, 0))
- return(0);
-
/* If it's not a write access, we can't do anything about it */
if(!is_write)
return(0);
printk("Couldn't stat '%s', err = %d\n", from_cow, -err);
return(1);
}
- if((buf1.ust_major == buf2.ust_major) &&
- (buf1.ust_minor == buf2.ust_minor) &&
- (buf1.ust_ino == buf2.ust_ino))
+ if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
return(1);
printk("Backing file mismatch - \"%s\" requested,\n"
data_offset_out);
if(!err)
return(fd);
-
os_close_file(fd);
out:
return(err);
" are 'xterm=gnome-terminal,-t,-x'.\n\n"
);
-/* XXX This badly needs some cleaning up in the error paths */
int xterm_open(int input, int output, int primary, void *d, char **dev_out)
{
struct xterm_chan *data = d;
goto out;
}
- CATCH_EINTR(err = tcgetattr(new, &data->tt));
- if(err){
- new = err;
- goto out;
- }
-
- if(data->raw){
- err = raw(new);
- if(err){
- new = err;
- goto out;
- }
- }
+ tcgetattr(new, &data->tt);
+ if(data->raw) raw(new, 0);
data->pid = pid;
*dev_out = NULL;
ENTRY(_start)
jiffies = jiffies_64;
+SEARCH_DIR("/usr/local/i686-pc-linux-gnu/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
SECTIONS
{
. = START + SIZEOF_HEADERS;
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef AIO_H__
-#define AIO_H__
-
-enum aio_type { AIO_READ, AIO_WRITE, AIO_MMAP };
-
-struct aio_thread_reply {
- void *data;
- int err;
-};
-
-struct aio_context {
- int reply_fd;
-};
-
-#define INIT_AIO_CONTEXT { .reply_fd = -1 }
-
-extern int submit_aio(enum aio_type type, int fd, char *buf, int len,
- unsigned long long offset, int reply_fd, void *data);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#ifndef __FILEHANDLE_H__
-#define __FILEHANDLE_H__
-
-#include "linux/list.h"
-#include "linux/fs.h"
-#include "os.h"
-
-struct file_handle {
- struct list_head list;
- int fd;
- char *(*get_name)(struct inode *);
- struct inode *inode;
- struct openflags flags;
-};
-
-extern struct file_handle bad_filehandle;
-
-extern int open_file(char *name, struct openflags flags, int mode);
-extern void *open_dir(char *file);
-extern int open_filehandle(char *name, struct openflags flags, int mode,
- struct file_handle *fh);
-extern int read_file(struct file_handle *fh, unsigned long long offset,
- char *buf, int len);
-extern int write_file(struct file_handle *fh, unsigned long long offset,
- const char *buf, int len);
-extern int truncate_file(struct file_handle *fh, unsigned long long size);
-extern int close_file(struct file_handle *fh);
-extern void not_reclaimable(struct file_handle *fh);
-extern void is_reclaimable(struct file_handle *fh,
- char *(name_proc)(struct inode *),
- struct inode *inode);
-extern int filehandle_fd(struct file_handle *fh);
-extern int make_pipe(struct file_handle *fhs);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
#define __uml_postsetup_call __attribute__ ((unused,__section__ (".uml.postsetup.init")))
#define __uml_exit_call __attribute__ ((unused,__section__ (".uml.exitcall.exit")))
-#ifndef __KERNEL__
-
-#define __initcall(fn) static initcall_t __initcall_##fn __init_call = fn
-#define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn
-
-#define __init_call __attribute__ ((unused,__section__ (".initcall.init")))
-#define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
-
-#endif
-
#endif /* _LINUX_UML_INIT_H */
/*
extern void free_irq_by_fd(int fd);
extern void reactivate_fd(int fd, int irqnum);
extern void deactivate_fd(int fd, int irqnum);
-extern int deactivate_all_fds(void);
extern void forward_interrupts(int pid);
extern void init_irq_signals(int on_sigstack);
extern void forward_ipi(int fd, int pid);
extern int phys_mapping(unsigned long phys, __u64 *offset_out);
extern int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w);
-extern int is_remapped(const void *virt, int fd, __u64 offset);
+extern int is_remapped(void *virt);
extern int physmem_remove_mapping(void *virt);
extern void physmem_forget_descriptor(int fd);
struct remapper {
struct list_head list;
- int (*proc)(int, unsigned long, int, __u64, int);
+ int (*proc)(int, unsigned long, int, __u64);
};
extern void register_remapper(struct remapper *info);
* (if they are wrong here, they are wrong there...).
*/
struct uml_stat {
- int ust_major; /* device */
- int ust_minor;
+ int ust_dev; /* device */
unsigned long long ust_ino; /* inode */
int ust_mode; /* protection */
int ust_nlink; /* number of hard links */
unsigned long ust_atime; /* time of last access */
unsigned long ust_mtime; /* time of last modification */
unsigned long ust_ctime; /* time of last change */
- int ust_rmajor;
- int ust_rminor;
};
struct openflags {
unsigned int a : 1; /* O_APPEND */
unsigned int e : 1; /* O_EXCL */
unsigned int cl : 1; /* FD_CLOEXEC */
- unsigned int d : 1; /* O_DIRECT */
};
#define OPENFLAGS() ((struct openflags) { .r = 0, .w = 0, .s = 0, .c = 0, \
- .t = 0, .a = 0, .e = 0, .cl = 0, \
- .d = 0 })
+ .t = 0, .a = 0, .e = 0, .cl = 0 })
static inline struct openflags of_read(struct openflags flags)
{
return(flags);
}
-static inline struct openflags of_direct(struct openflags flags)
-{
- flags.d = 1;
- return(flags);
-}
-
extern int os_stat_file(const char *file_name, struct uml_stat *buf);
-extern int os_lstat_file(const char *file_name, struct uml_stat *ubuf);
extern int os_stat_fd(const int fd, struct uml_stat *buf);
extern int os_access(const char *file, int mode);
-extern int os_set_file_time(const char *file, unsigned long access,
- unsigned long mod);
-extern int os_set_file_perms(const char *file, int mode);
-extern int os_set_file_owner(const char *file, int owner, int group);
extern void os_print_error(int error, const char* str);
extern int os_get_exec_close(int fd, int *close_on_exec);
extern int os_set_exec_close(int fd, int close_on_exec);
extern int os_seek_file(int fd, __u64 offset);
extern int os_open_file(char *file, struct openflags flags, int mode);
-extern void *os_open_dir(char *dir, int *err_out);
-extern int os_seek_dir(void *stream, unsigned long long pos);
-extern int os_read_dir(void *stream, unsigned long long *ino_out,
- char **name_out);
-extern int os_tell_dir(void *stream);
-extern int os_close_dir(void *stream);
-extern int os_remove_file(const char *file);
-extern int os_move_file(const char *from, const char *to);
-extern int os_truncate_file(const char *file, unsigned long long len);
-extern int os_truncate_fd(int fd, unsigned long long len);
extern int os_read_file(int fd, void *buf, int len);
extern int os_write_file(int fd, const void *buf, int count);
extern int os_file_size(char *file, long long *size_out);
-extern int os_fd_size(int fd, long long *size_out);
extern int os_file_modtime(char *file, unsigned long *modtime);
extern int os_pipe(int *fd, int stream, int close_on_exec);
extern int os_set_fd_async(int fd, int owner);
-extern int os_clear_fd_async(int fd);
extern int os_set_fd_block(int fd, int blocking);
extern int os_accept_connection(int fd);
extern int os_create_unix_socket(char *file, int len, int close_on_exec);
-extern int os_make_symlink(const char *to, const char *from);
-extern int os_read_symlink(const char *file, char *buf, int size);
-extern int os_link_file(const char *to, const char *from);
-extern int os_make_dir(const char *dir, int mode);
-extern int os_remove_dir(const char *dir);
-extern int os_make_dev(const char *name, int mode, int major, int minor);
extern int os_shutdown_socket(int fd, int r, int w);
extern void os_close_file(int fd);
extern int os_rcv_fd(int fd, int *helper_pid_out);
int r, int w, int x);
extern int os_unmap_memory(void *addr, int len);
extern void os_flush_stdout(void);
-extern int os_stat_filesystem(char *path, long *bsize_out,
- long long *blocks_out, long long *bfree_out,
- long long *bavail_out, long long *files_out,
- long long *ffree_out, void *fsid_out,
- int fsid_size, long *namelen_out,
- long *spare_out);
-extern unsigned long long os_usecs(void);
#endif
+++ /dev/null
-/* Automatically generated by arch/um/kernel/skas/util/mk_ptregs */
-
-#ifndef __SKAS_PT_REGS_
-#define __SKAS_PT_REGS_
-
-#define HOST_FRAME_SIZE 17
-#define HOST_FP_SIZE 27
-#define HOST_XFP_SIZE 128
-#define HOST_IP 12
-#define HOST_SP 15
-#define HOST_EFLAGS 14
-#define HOST_EAX 6
-#define HOST_EBX 0
-#define HOST_ECX 1
-#define HOST_EDX 2
-#define HOST_ESI 3
-#define HOST_EDI 4
-#define HOST_EBP 5
-#define HOST_CS 13
-#define HOST_SS 16
-#define HOST_DS 7
-#define HOST_FS 9
-#define HOST_ES 8
-#define HOST_GS 10
-
-#endif
#ifndef __UM_SYSDEP_CHECKSUM_H
#define __UM_SYSDEP_CHECKSUM_H
-#include "linux/in6.h"
#include "linux/string.h"
/*
extern void set_interval(int timer_type);
extern void idle_sleep(int secs);
extern void enable_timer(void);
-extern void disable_timer(void);
extern unsigned long time_lock(void);
extern void time_unlock(unsigned long);
extern int in_aton(char *str);
extern int open_gdb_chan(void);
extern int strlcpy(char *, const char *, int);
-extern void *um_vmalloc(int size);
-extern void vfree(void *ptr);
#endif
#include "sysdep/ptrace.h"
-#define CATCH_EINTR(expr) while (((expr) < 0) && (errno == EINTR))
-
extern int mode_tt;
extern int grantpt(int __fd);
extern void input_cb(void (*proc)(void *), void *arg, int arg_len);
extern int get_pty(void);
extern void *um_kmalloc(int size);
+extern int raw(int fd, int complain);
extern int switcheroo(int fd, int prot, void *from, void *to, int size);
extern void setup_machinename(char *machine_out);
extern void setup_hostinfo(void);
extern void tracer_panic(char *msg, ...);
extern char *get_umid(int only_if_set);
extern void do_longjmp(void *p, int val);
+extern void suspend_new_thread(int fd);
extern int detach(int pid, int sig);
extern int attach(int pid);
extern void kill_child_dead(int pid);
extern void forward_pending_sigio(int target);
extern int can_do_skas(void);
extern void arch_init_thread(void);
-extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
-extern int raw(int fd);
#endif
extra-y := vmlinux.lds.s
-# Descend into ../util for make clean. This is here because it doesn't work
-# in arch/um/Makefile.
-
-subdir- = ../util
-
-obj-y = checksum.o config.o exec_kern.o exitcode.o filehandle.o frame_kern.o \
- frame.o helper.o init_task.o irq.o irq_user.o ksyms.o mem.o \
- mem_user.o physmem.o process.o process_kern.o ptrace.o reboot.o \
- resource.o sigio_user.o sigio_kern.o signal_kern.o signal_user.o \
- smp.o syscall_kern.o syscall_user.o sysrq.o sys_call_table.o \
- tempfile.o time.o time_kern.o tlb.o trap_kern.o trap_user.o \
- uaccess_user.o um_arch.o umid.o user_util.o
+obj-y = checksum.o config.o exec_kern.o exitcode.o frame_kern.o frame.o \
+ helper.o init_task.o irq.o irq_user.o ksyms.o mem.o mem_user.o \
+ physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \
+ sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \
+ syscall_kern.o syscall_user.o sysrq.o sys_call_table.o tempfile.o \
+ time.o time_kern.o tlb.o trap_kern.o trap_user.o uaccess_user.o \
+ um_arch.o umid.o user_util.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd_kern.o initrd_user.o
obj-$(CONFIG_GPROF) += gprof_syms.o
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#include "linux/slab.h"
-#include "linux/list.h"
-#include "linux/spinlock.h"
-#include "linux/fs.h"
-#include "linux/errno.h"
-#include "filehandle.h"
-#include "os.h"
-#include "kern_util.h"
-
-static spinlock_t open_files_lock = SPIN_LOCK_UNLOCKED;
-static struct list_head open_files = LIST_HEAD_INIT(open_files);
-
-#define NUM_RECLAIM 128
-
-static void reclaim_fds(void)
-{
- struct file_handle *victim;
- int closed = NUM_RECLAIM;
-
- spin_lock(&open_files_lock);
- while(!list_empty(&open_files) && closed--){
- victim = list_entry(open_files.prev, struct file_handle, list);
- os_close_file(victim->fd);
- victim->fd = -1;
- list_del_init(&victim->list);
- }
- spin_unlock(&open_files_lock);
-}
-
-int open_file(char *name, struct openflags flags, int mode)
-{
- int fd;
-
- fd = os_open_file(name, flags, mode);
- if(fd != -EMFILE)
- return(fd);
-
- reclaim_fds();
- fd = os_open_file(name, flags, mode);
-
- return(fd);
-}
-
-void *open_dir(char *file)
-{
- void *dir;
- int err;
-
- dir = os_open_dir(file, &err);
- if(dir != NULL)
- return(dir);
- if(err != -EMFILE)
- return(ERR_PTR(err));
-
- reclaim_fds();
-
- dir = os_open_dir(file, &err);
- if(dir == NULL)
- dir = ERR_PTR(err);
-
- return(dir);
-}
-
-void not_reclaimable(struct file_handle *fh)
-{
- char *name;
-
- if(fh->get_name == NULL)
- return;
-
- if(list_empty(&fh->list)){
- name = (*fh->get_name)(fh->inode);
- if(name != NULL){
- fh->fd = open_file(name, fh->flags, 0);
- kfree(name);
- }
- else printk("File descriptor %d has no name\n", fh->fd);
- }
- else {
- spin_lock(&open_files_lock);
- list_del_init(&fh->list);
- spin_unlock(&open_files_lock);
- }
-}
-
-void is_reclaimable(struct file_handle *fh, char *(name_proc)(struct inode *),
- struct inode *inode)
-{
- fh->get_name = name_proc;
- fh->inode = inode;
-
- spin_lock(&open_files_lock);
- list_add(&fh->list, &open_files);
- spin_unlock(&open_files_lock);
-}
-
-static int active_handle(struct file_handle *fh)
-{
- int fd;
- char *name;
-
- if(!list_empty(&fh->list))
- list_move(&fh->list, &open_files);
-
- if(fh->fd != -1)
- return(0);
-
- if(fh->inode == NULL)
- return(-ENOENT);
-
- name = (*fh->get_name)(fh->inode);
- if(name == NULL)
- return(-ENOMEM);
-
- fd = open_file(name, fh->flags, 0);
- kfree(name);
- if(fd < 0)
- return(fd);
-
- fh->fd = fd;
- is_reclaimable(fh, fh->get_name, fh->inode);
-
- return(0);
-}
-
-int filehandle_fd(struct file_handle *fh)
-{
- int err;
-
- err = active_handle(fh);
- if(err)
- return(err);
-
- return(fh->fd);
-}
-
-static void init_fh(struct file_handle *fh, int fd, struct openflags flags)
-{
- flags.c = 0;
- *fh = ((struct file_handle) { .list = LIST_HEAD_INIT(fh->list),
- .fd = fd,
- .get_name = NULL,
- .inode = NULL,
- .flags = flags });
-}
-
-int open_filehandle(char *name, struct openflags flags, int mode,
- struct file_handle *fh)
-{
- int fd;
-
- fd = open_file(name, flags, mode);
- if(fd < 0)
- return(fd);
-
- init_fh(fh, fd, flags);
- return(0);
-}
-
-int close_file(struct file_handle *fh)
-{
- spin_lock(&open_files_lock);
- list_del(&fh->list);
- spin_unlock(&open_files_lock);
-
- os_close_file(fh->fd);
-
- fh->fd = -1;
- return(0);
-}
-
-int read_file(struct file_handle *fh, unsigned long long offset, char *buf,
- int len)
-{
- int err;
-
- err = active_handle(fh);
- if(err)
- return(err);
-
- err = os_seek_file(fh->fd, offset);
- if(err)
- return(err);
-
- return(os_read_file(fh->fd, buf, len));
-}
-
-int write_file(struct file_handle *fh, unsigned long long offset,
- const char *buf, int len)
-{
- int err;
-
- err = active_handle(fh);
- if(err)
- return(err);
-
- if(offset != -1)
- err = os_seek_file(fh->fd, offset);
- if(err)
- return(err);
-
- return(os_write_file(fh->fd, buf, len));
-}
-
-int truncate_file(struct file_handle *fh, unsigned long long size)
-{
- int err;
-
- err = active_handle(fh);
- if(err)
- return(err);
-
- return(os_truncate_fd(fh->fd, size));
-}
-
-int make_pipe(struct file_handle *fhs)
-{
- int fds[2], err;
-
- err = os_pipe(fds, 1, 1);
- if(err && (err != -EMFILE))
- return(err);
-
- if(err){
- reclaim_fds();
- err = os_pipe(fds, 1, 1);
- }
- if(err)
- return(err);
-
- init_fh(&fhs[0], fds[0], OPENFLAGS());
- init_fh(&fhs[1], fds[1], OPENFLAGS());
- return(0);
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
#include "sysdep/sigcontext.h"
#include "frame_user.h"
#include "kern_util.h"
-#include "user_util.h"
#include "ptrace_user.h"
#include "os.h"
/* Wait for it to stop itself and continue it with a SIGUSR1 to force
* it into the signal handler.
*/
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0){
printf("capture_stack : waitpid failed - errno = %d\n", errno);
exit(1);
* At this point, the handler has stuffed the addresses of
* sig, sc, and SA_RESTORER in raw.
*/
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0){
printf("capture_stack : waitpid failed - errno = %d\n", errno);
exit(1);
errno);
exit(1);
}
- CATCH_EINTR(n = waitpid(pid, &status, 0));
- if(n < 0){
+ if(waitpid(pid, &status, 0) < 0){
printf("capture_stack : waitpid failed - errno = %d\n", errno);
exit(1);
}
#include <sys/wait.h>
#include "user.h"
#include "kern_util.h"
-#include "user_util.h"
#include "os.h"
struct helper_data {
if(n < 0){
printk("run_helper : read on pipe failed, err = %d\n", -n);
err = n;
- os_kill_process(pid, 1);
+ goto out_kill;
}
else if(n != 0){
- CATCH_EINTR(n = waitpid(pid, NULL, 0));
+ waitpid(pid, NULL, 0);
pid = -errno;
}
- err = pid;
+ if(stack_out == NULL) free_stack(stack, 0);
+ else *stack_out = stack;
+ return(pid);
+
+ out_kill:
+ os_kill_process(pid, 1);
out_close:
os_close_file(fds[0]);
+ os_close_file(fds[1]);
out_free:
- if(stack_out == NULL)
- free_stack(stack, 0);
- else *stack_out = stack;
+ free_stack(stack, 0);
return(err);
}
#include "linux/module.h"
#include "linux/sched.h"
#include "linux/init_task.h"
+#include "linux/version.h"
#include "linux/mqueue.h"
#include "asm/uaccess.h"
#include "asm/pgtable.h"
irq_unlock(flags);
}
-int deactivate_all_fds(void)
-{
- struct irq_fd *irq;
- int err;
-
- for(irq=active_fds;irq != NULL;irq = irq->next){
- err = os_clear_fd_async(irq->fd);
- if(err)
- return(err);
- }
-
- return(0);
-}
-
void forward_ipi(int fd, int pid)
{
int err;
EXPORT_SYMBOL(find_iomem);
#ifdef CONFIG_MODE_TT
-EXPORT_SYMBOL(strncpy_from_user_tt);
EXPORT_SYMBOL(copy_from_user_tt);
EXPORT_SYMBOL(copy_to_user_tt);
#endif
#ifdef CONFIG_MODE_SKAS
-EXPORT_SYMBOL(strncpy_from_user_skas);
EXPORT_SYMBOL(copy_to_user_skas);
EXPORT_SYMBOL(copy_from_user_skas);
#endif
struct iomem_region *new;
struct uml_stat buf;
char *file, *driver;
- int fd, err, size;
+ int fd, err;
driver = str;
file = strchr(str,',');
goto out_close;
}
- size = (buf.ust_size + UM_KERN_PAGE_SIZE) & ~(UM_KERN_PAGE_SIZE - 1);
-
*new = ((struct iomem_region) { .next = iomem_regions,
.driver = driver,
.fd = fd,
- .size = size,
+ .size = buf.ust_size,
.phys = 0,
.virt = 0 });
iomem_regions = new;
return(0);
}
-#if 0
-/* Debugging facility for dumping stuff out to the host, avoiding the timing
- * problems that come with printf and breakpoints.
- * Enable in case of emergency.
- */
-
-int logging = 1;
-int logging_fd = -1;
-
-int logging_line = 0;
-char logging_buf[512];
-
-void log(char *fmt, ...)
-{
- va_list ap;
- struct timeval tv;
- struct openflags flags;
-
- if(logging == 0) return;
- if(logging_fd < 0){
- flags = of_create(of_trunc(of_rdwr(OPENFLAGS())));
- logging_fd = os_open_file("log", flags, 0644);
- }
- gettimeofday(&tv, NULL);
- sprintf(logging_buf, "%d\t %u.%u ", logging_line++, tv.tv_sec,
- tv.tv_usec);
- va_start(ap, fmt);
- vsprintf(&logging_buf[strlen(logging_buf)], fmt, ap);
- va_end(ap);
- write(logging_fd, logging_buf, strlen(logging_buf));
-}
-#endif
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
unsigned long phys;
int err;
- phys = __pa(virt);
- desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
- if(desc != NULL){
- if((virt != desc->virt) || (fd != desc->fd) ||
- (offset != desc->offset))
- panic("Address 0x%p is already substituted\n", virt);
- return(0);
- }
-
fd_maps = descriptor_mapping(fd);
if(fd_maps == NULL)
return(-ENOMEM);
+ phys = __pa(virt);
+ if(find_virtmem_hash(&virtmem_hash, virt) != NULL)
+ panic("Address 0x%p is already substituted\n", virt);
+
err = -ENOMEM;
desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
if(desc == NULL)
if(desc == NULL)
return;
- if(!list_empty(&desc->pages))
- printk("Still have mapped pages on fd %d\n", fd);
-
list_for_each_safe(ele, next, &desc->pages){
page = list_entry(ele, struct phys_desc, list);
offset = page->offset;
}
}
-int is_remapped(const void *virt, int fd, __u64 offset)
+int is_remapped(void *virt)
{
- struct phys_desc *desc;
-
- desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
- if(desc == NULL)
- return(0);
- if(offset != desc->offset)
- printk("offset mismatch\n");
- return(find_virtmem_hash(&virtmem_hash, (void *) virt) != NULL);
+ return(find_virtmem_hash(&virtmem_hash, virt) != NULL);
}
/* Changed during early boot */
int flags = 0, pages;
if(sig_stack != NULL){
- pages = (1 << UML_CONFIG_KERNEL_STACK_ORDER);
+ pages = (1 << UML_CONFIG_KERNEL_STACK_ORDER) - 2;
set_sigstack(sig_stack, pages * page_size());
flags = SA_ONSTACK;
}
{
int flags = altstack ? SA_ONSTACK : 0;
- set_handler(SIGSEGV, (__sighandler_t) sig_handler, flags,
+ /* NODEFER is set here because SEGV isn't turned back on when the
+ * handler is ready to receive signals. This causes any segfault
+ * during a copy_user to kill the process because the fault is blocked.
+ */
+ set_handler(SIGSEGV, (__sighandler_t) sig_handler, flags | SA_NODEFER,
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGTRAP, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGWINCH, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGUSR2, (__sighandler_t) sig_handler,
- flags, SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
+ SA_NOMASK | flags, -1);
signal(SIGHUP, SIG_IGN);
init_irq_signals(altstack);
/* Start the process and wait for it to kill itself */
new_pid = clone(outer_tramp, (void *) sp, clone_flags, &arg);
- if(new_pid < 0)
- return(new_pid);
-
- CATCH_EINTR(err = waitpid(new_pid, &status, 0));
- if(err < 0)
- panic("Waiting for outer trampoline failed - errno = %d",
- errno);
-
+ if(new_pid < 0) return(-errno);
+ while(((err = waitpid(new_pid, &status, 0)) < 0) && (errno == EINTR)) ;
+ if(err < 0) panic("Waiting for outer trampoline failed - errno = %d",
+ errno);
if(!WIFSIGNALED(status) || (WTERMSIG(status) != SIGKILL))
panic("outer trampoline didn't exit with SIGKILL, "
"status = %d", status);
return(arg.pid);
}
+void suspend_new_thread(int fd)
+{
+ char c;
+
+ os_stop_process(os_getpid());
+
+ if(os_read_file(fd, &c, sizeof(c)) != sizeof(c))
+ panic("read failed in suspend_new_thread");
+}
+
static int ptrace_child(void *arg)
{
int pid = os_getpid();
pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
if(pid < 0)
panic("check_ptrace : clone failed, errno = %d", errno);
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0)
panic("check_ptrace : wait failed, errno = %d", errno);
if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
panic("check_ptrace : ptrace failed, errno = %d", errno);
- CATCH_EINTR(n = waitpid(pid, &status, 0));
+ n = waitpid(pid, &status, 0);
if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode))
panic("check_ptrace : child exited with status 0x%x", status);
if(ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0)
panic("check_ptrace : ptrace failed, errno = %d",
errno);
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0)
panic("check_ptrace : wait failed, errno = %d", errno);
if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP))
int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr)
{
- sigjmp_buf buf;
+ jmp_buf buf;
int n;
*jmp_ptr = &buf;
#include "linux/module.h"
#include "linux/init.h"
#include "linux/capability.h"
-#include "linux/vmalloc.h"
#include "linux/spinlock.h"
#include "asm/unistd.h"
#include "asm/mman.h"
struct pt_regs *regs)
{
p->thread = (struct thread_struct) INIT_THREAD;
+ p->thread.kernel_stack =
+ (unsigned long) p->thread_info + 2 * PAGE_SIZE;
return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
clone_flags, sp, stack_top, p, regs));
}
return(kmalloc(size, GFP_ATOMIC));
}
-void *um_vmalloc(int size)
-{
- return(vmalloc(size));
-}
-
unsigned long get_fault_addr(void)
{
return((unsigned long) current->thread.fault_addr);
unsigned long stack;
stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
- return(stack != (unsigned long) current_thread);
+ stack += 2 * PAGE_SIZE;
+ return(stack != current->thread.kernel_stack);
}
extern void remove_umid_dir(void);
/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
-static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused)
+irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused)
{
read_sigio_fd(sigio_irq_fd);
reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ);
int write_sigio_irq(int fd)
{
- int err;
-
- err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- SA_INTERRUPT | SA_SAMPLE_RANDOM, "write sigio",
- NULL);
- if(err){
- printk("write_sigio_irq : um_request_irq failed, err = %d\n",
- err);
+ if(um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
+ SA_INTERRUPT | SA_SAMPLE_RANDOM, "write sigio",
+ NULL)){
+ printk("write_sigio_irq : um_request_irq failed\n");
return(-1);
}
sigio_irq_fd = fd;
#include "init.h"
#include "user.h"
#include "kern_util.h"
-#include "user_util.h"
#include "sigio.h"
#include "helper.h"
#include "os.h"
void __init check_one_sigio(void (*proc)(int, int))
{
struct sigaction old, new;
+ struct termios tt;
struct openpty_arg pty = { .master = -1, .slave = -1 };
int master, slave, err;
return;
}
- /* Not now, but complain so we now where we failed. */
- err = raw(master);
- if (err < 0)
- panic("check_sigio : __raw failed, errno = %d\n", -err);
+ /* XXX These can fail with EINTR */
+ if(tcgetattr(master, &tt) < 0)
+ panic("check_sigio : tcgetattr failed, errno = %d\n", errno);
+ cfmakeraw(&tt);
+ if(tcsetattr(master, TCSADRAIN, &tt) < 0)
+ panic("check_sigio : tcsetattr failed, errno = %d\n", errno);
err = os_sigio_async(master, slave);
if(err < 0)
#include <sys/ptrace.h>
#include "user.h"
#include "kern_util.h"
-#include "user_util.h"
#include "os.h"
#include "time_user.h"
int user_thread(unsigned long stack, int flags)
{
- int pid, status, err;
+ int pid, status;
pid = clone(user_thread_tramp, (void *) stack_sp(stack),
flags | CLONE_FILES | SIGCHLD, NULL);
return(pid);
}
- CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
- if(err < 0){
+ if(waitpid(pid, &status, WUNTRACED) < 0){
printk("user_thread - waitpid failed, errno = %d\n", errno);
return(-errno);
}
#ifndef __MODE_SKAS_H__
#define __MODE_SKAS_H__
-#include <sysdep/ptrace.h>
-
extern unsigned long exec_regs[];
extern unsigned long exec_fp_regs[];
extern unsigned long exec_fpx_regs[];
panic("handle_trap - continuing to end of syscall failed, "
"errno = %d\n", errno);
- CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
+ err = waitpid(pid, &status, WUNTRACED);
if((err < 0) || !WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP))
panic("handle_trap - failed to wait at end of syscall, "
"errno = %d, status = %d\n", errno, status);
panic("start_userspace : clone failed, errno = %d", errno);
do {
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0)
panic("start_userspace : wait failed, errno = %d",
errno);
panic("userspace - PTRACE_SYSCALL failed, errno = %d\n",
errno);
while(1){
- CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
+ err = waitpid(pid, &status, WUNTRACED);
if(err < 0)
panic("userspace - waitpid failed, errno = %d\n",
errno);
void (*handler)(int))
{
unsigned long flags;
- sigjmp_buf switch_buf, fork_buf;
+ jmp_buf switch_buf, fork_buf;
*switch_buf_ptr = &switch_buf;
*fork_buf_ptr = &fork_buf;
void thread_wait(void *sw, void *fb)
{
- sigjmp_buf buf, **switch_buf = sw, *fork_buf;
+ jmp_buf buf, **switch_buf = sw, *fork_buf;
*switch_buf = &buf;
fork_buf = fb;
void switch_threads(void *me, void *next)
{
- sigjmp_buf my_buf, **me_ptr = me, *next_buf = next;
+ jmp_buf my_buf, **me_ptr = me, *next_buf = next;
*me_ptr = &my_buf;
if(sigsetjmp(my_buf, 1) == 0)
siglongjmp(*next_buf, 1);
}
-static sigjmp_buf initial_jmpbuf;
+static jmp_buf initial_jmpbuf;
/* XXX Make these percpu */
static void (*cb_proc)(void *arg);
static void *cb_arg;
-static sigjmp_buf *cb_back;
+static jmp_buf *cb_back;
int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
{
- sigjmp_buf **switch_buf = switch_buf_ptr;
+ jmp_buf **switch_buf = switch_buf_ptr;
int n;
*fork_buf_ptr = &initial_jmpbuf;
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
{
- sigjmp_buf here;
+ jmp_buf here;
cb_proc = proc;
cb_arg = arg;
siglongjmp(initial_jmpbuf, 4);
}
+int new_mm(int from)
+{
+ struct proc_mm_op copy;
+ int n, fd = os_open_file("/proc/mm",
+ of_cloexec(of_write(OPENFLAGS())), 0);
+
+ if(fd < 0)
+ return(fd);
+
+ if(from != -1){
+ copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
+ .u =
+ { .copy_segments = from } } );
+ n = os_write_file(fd, ©, sizeof(copy));
+ if(n != sizeof(copy))
+ printk("new_mm : /proc/mm copy_segments failed, "
+ "err = %d\n", -n);
+ }
+
+ return(fd);
+}
+
void switch_mm_skas(int mm_fd)
{
int err;
#include "frame.h"
#include "kern.h"
#include "mode.h"
-#include "filehandle.h"
-#include "proc_mm.h"
int singlestepping_skas(void)
{
handler = new_thread_handler;
}
- new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
+ new_thread((void *) p->thread.kernel_stack,
+ &p->thread.mode.skas.switch_buf,
&p->thread.mode.skas.fork_buf, handler);
return(0);
}
-int new_mm(int from)
-{
- struct proc_mm_op copy;
- int n, fd;
-
- fd = open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
- if(fd < 0)
- return(fd);
-
- if(from != -1){
- copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
- .u =
- { .copy_segments = from } } );
- n = os_write_file(fd, ©, sizeof(copy));
- if(n != sizeof(copy))
- printk("new_mm : /proc/mm copy_segments failed, "
- "err = %d\n", -n);
- }
-
- return(fd);
-}
-
void init_idle_skas(void)
{
cpu_tasks[current_thread->cpu].pid = os_getpid();
{
start_userspace(0);
capture_signal_stack();
+ uml_idle_timer();
init_new_thread_signals(1);
- uml_idle_timer();
init_task.thread.request.u.thread.proc = start_kernel_proc;
init_task.thread.request.u.thread.arg = NULL;
- return(start_idle_thread(init_task.thread_info,
+ return(start_idle_thread((void *) init_task.thread.kernel_stack,
&init_task.thread.mode.skas.switch_buf,
&init_task.thread.mode.skas.fork_buf));
}
struct skas_regs *r;
struct signal_info *info;
int save_errno = errno;
- int save_user;
r = &TASK_REGS(get_current())->skas;
- save_user = r->is_user;
r->is_user = 0;
r->fault_addr = SC_FAULT_ADDR(sc);
r->fault_type = SC_FAULT_TYPE(sc);
(*info->handler)(sig, (union uml_pt_regs *) r);
errno = save_errno;
- r->is_user = save_user;
}
void user_signal(int sig, union uml_pt_regs *regs)
#include "asm/pgtable.h"
#include "asm/uaccess.h"
#include "kern_util.h"
-#include "user_util.h"
extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
pte_t *pte_out);
int dummy_code;
if(IS_ERR(phys) || (is_write && !pte_write(pte))){
- err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
+ err = handle_page_fault(virt, 0, is_write, 0, &dummy_code);
if(err)
return(0);
phys = um_virt_to_phys(current, virt, NULL);
return(n);
}
-static void do_buffer_op(void *jmpbuf, void *arg_ptr)
+static int buffer_op(unsigned long addr, int len, int is_write,
+ int (*op)(unsigned long addr, int len, void *arg),
+ void *arg)
{
- va_list args = *((va_list *) arg_ptr);
- unsigned long addr = va_arg(args, unsigned long);
- int len = va_arg(args, int);
- int is_write = va_arg(args, int);
- int (*op)(unsigned long, int, void *) = va_arg(args, void *);
- void *arg = va_arg(args, void *);
- int *res = va_arg(args, int *);
int size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
int remain = len, n;
- current->thread.fault_catcher = jmpbuf;
n = do_op(addr, size, is_write, op, arg);
- if(n != 0){
- *res = (n < 0 ? remain : 0);
- goto out;
- }
+ if(n != 0)
+ return(n < 0 ? remain : 0);
addr += size;
remain -= size;
- if(remain == 0){
- *res = 0;
- goto out;
- }
+ if(remain == 0)
+ return(0);
while(addr < ((addr + remain) & PAGE_MASK)){
n = do_op(addr, PAGE_SIZE, is_write, op, arg);
- if(n != 0){
- *res = (n < 0 ? remain : 0);
- goto out;
- }
+ if(n != 0)
+ return(n < 0 ? remain : 0);
addr += PAGE_SIZE;
remain -= PAGE_SIZE;
}
- if(remain == 0){
- *res = 0;
- goto out;
- }
+ if(remain == 0)
+ return(0);
n = do_op(addr, remain, is_write, op, arg);
if(n != 0)
- *res = (n < 0 ? remain : 0);
- else *res = 0;
- out:
- current->thread.fault_catcher = NULL;
-}
-
-static int buffer_op(unsigned long addr, int len, int is_write,
- int (*op)(unsigned long addr, int len, void *arg),
- void *arg)
-{
- int faulted, res;
-
- faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
- &res);
- if(!faulted)
- return(res);
-
- return(addr + len - (unsigned long) current->thread.fault_addr);
+ return(n < 0 ? remain : 0);
+ return(0);
}
static int copy_chunk_from_user(unsigned long from, int len, void *arg)
*/
struct cpuinfo_um cpu_data[NR_CPUS];
+spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
+
+atomic_t global_bh_count;
+
+/* Not used by UML */
+unsigned char global_irq_holder = NO_PROC_ID;
+unsigned volatile long global_irq_lock;
+
/* Set when the idlers are all forked */
int smp_threads_ready = 0;
num_reschedules_sent++;
}
+static void show(char * str)
+{
+ int cpu = smp_processor_id();
+
+ printk(KERN_INFO "\n%s, CPU %d:\n", str, cpu);
+}
+
+#define MAXCOUNT 100000000
+
+static inline void wait_on_bh(void)
+{
+ int count = MAXCOUNT;
+ do {
+ if (!--count) {
+ show("wait_on_bh");
+ count = ~0;
+ }
+ /* nothing .. wait for the other bh's to go away */
+ } while (atomic_read(&global_bh_count) != 0);
+}
+
+/*
+ * This is called when we want to synchronize with
+ * bottom half handlers. We need to wait until
+ * no other CPU is executing any bottom half handler.
+ *
+ * Don't wait if we're already running in an interrupt
+ * context or are inside a bh handler.
+ */
+void synchronize_bh(void)
+{
+ if (atomic_read(&global_bh_count) && !in_interrupt())
+ wait_on_bh();
+}
+
void smp_send_stop(void)
{
int i;
.task = new_task } );
idle_threads[cpu] = new_task;
CHOOSE_MODE(os_write_file(new_task->thread.mode.tt.switch_pipe[1], &c,
- sizeof(c)),
+ sizeof(c)),
+ ({ panic("skas mode doesn't support SMP"); }));
wake_up_forked_process(new_task);
return(new_task);
}
--- /dev/null
+/*
+ * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/config.h"
+
+#ifdef CONFIG_SMP
+
+#include "linux/sched.h"
+#include "linux/module.h"
+#include "linux/threads.h"
+#include "linux/interrupt.h"
+#include "linux/err.h"
+#include "asm/smp.h"
+#include "asm/processor.h"
+#include "asm/spinlock.h"
+#include "asm/hardirq.h"
+#include "user_util.h"
+#include "kern_util.h"
+#include "kern.h"
+#include "irq_user.h"
+#include "os.h"
+
+/* CPU online map, set by smp_boot_cpus */
+unsigned long cpu_online_map = cpumask_of_cpu(0);
+
+EXPORT_SYMBOL(cpu_online_map);
+
+/* Per CPU bogomips and other parameters
+ * The only piece used here is the ipi pipe, which is set before SMP is
+ * started and never changed.
+ */
+struct cpuinfo_um cpu_data[NR_CPUS];
+
+spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
+
+atomic_t global_bh_count;
+
+/* Not used by UML */
+unsigned char global_irq_holder = NO_PROC_ID;
+unsigned volatile long global_irq_lock;
+
+/* Set when the idlers are all forked */
+int smp_threads_ready = 0;
+
+/* A statistic, can be a little off */
+int num_reschedules_sent = 0;
+
+/* Small, random number, never changed */
+unsigned long cache_decay_ticks = 5;
+
+/* Not changed after boot */
+struct task_struct *idle_threads[NR_CPUS];
+
+void smp_send_reschedule(int cpu)
+{
+ write(cpu_data[cpu].ipi_pipe[1], "R", 1);
+ num_reschedules_sent++;
+}
+
+static void show(char * str)
+{
+ int cpu = smp_processor_id();
+
+ printk(KERN_INFO "\n%s, CPU %d:\n", str, cpu);
+}
+
+#define MAXCOUNT 100000000
+
+static inline void wait_on_bh(void)
+{
+ int count = MAXCOUNT;
+ do {
+ if (!--count) {
+ show("wait_on_bh");
+ count = ~0;
+ }
+ /* nothing .. wait for the other bh's to go away */
+ } while (atomic_read(&global_bh_count) != 0);
+}
+
+/*
+ * This is called when we want to synchronize with
+ * bottom half handlers. We need to wait until
+ * no other CPU is executing any bottom half handler.
+ *
+ * Don't wait if we're already running in an interrupt
+ * context or are inside a bh handler.
+ */
+void synchronize_bh(void)
+{
+ if (atomic_read(&global_bh_count) && !in_interrupt())
+ wait_on_bh();
+}
+
+void smp_send_stop(void)
+{
+ int i;
+
+ printk(KERN_INFO "Stopping all CPUs...");
+ for(i = 0; i < num_online_cpus(); i++){
+ if(i == current->thread_info->cpu)
+ continue;
+ write(cpu_data[i].ipi_pipe[1], "S", 1);
+ }
+ printk("done\n");
+}
+
+static cpumask_t smp_commenced_mask;
+static cpumask_t smp_callin_map = CPU_MASK_NONE;
+
+static int idle_proc(void *cpup)
+{
+ int cpu = (int) cpup, err;
+
+ err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
+ if(err)
+ panic("CPU#%d failed to create IPI pipe, errno = %d", cpu,
+ -err);
+
+ activate_ipi(cpu_data[cpu].ipi_pipe[0],
+ current->thread.mode.tt.extern_pid);
+
+ wmb();
+ if (cpu_test_and_set(cpu, &smp_callin_map)) {
+ printk("huh, CPU#%d already present??\n", cpu);
+ BUG();
+ }
+
+ while (!cpu_isset(cpu, &smp_commenced_mask))
+ cpu_relax();
+
+ cpu_set(cpu, cpu_online_map);
+ default_idle();
+ return(0);
+}
+
+static struct task_struct *idle_thread(int cpu)
+{
+ struct task_struct *new_task;
+ unsigned char c;
+
+ current->thread.request.u.thread.proc = idle_proc;
+ current->thread.request.u.thread.arg = (void *) cpu;
+ new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL, NULL);
+ if(IS_ERR(new_task)) panic("do_fork failed in idle_thread");
+
+ cpu_tasks[cpu] = ((struct cpu_task)
+ { .pid = new_task->thread.mode.tt.extern_pid,
+ .task = new_task } );
+ idle_threads[cpu] = new_task;
+ CHOOSE_MODE(write(new_task->thread.mode.tt.switch_pipe[1], &c,
+ sizeof(c)),
+ ({ panic("skas mode doesn't support SMP"); }));
+ return(new_task);
+}
+
+void smp_prepare_cpus(unsigned int maxcpus)
+{
+ struct task_struct *idle;
+ unsigned long waittime;
+ int err, cpu;
+
+ cpu_set(0, cpu_online_map);
+ cpu_set(0, smp_callin_map);
+
+ err = os_pipe(cpu_data[0].ipi_pipe, 1, 1);
+ if(err) panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
+
+ activate_ipi(cpu_data[0].ipi_pipe[0],
+ current->thread.mode.tt.extern_pid);
+
+ for(cpu = 1; cpu < ncpus; cpu++){
+ printk("Booting processor %d...\n", cpu);
+
+ idle = idle_thread(cpu);
+
+ init_idle(idle, cpu);
+ unhash_process(idle);
+
+ waittime = 200000000;
+ while (waittime-- && !cpu_isset(cpu, smp_callin_map))
+ cpu_relax();
+
+ if (cpu_isset(cpu, smp_callin_map))
+ printk("done\n");
+ else printk("failed\n");
+ }
+}
+
+void smp_prepare_boot_cpu(void)
+{
+ cpu_set(smp_processor_id(), cpu_online_map);
+}
+
+int __cpu_up(unsigned int cpu)
+{
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ return(0);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ printk(KERN_INFO "setup_profiling_timer\n");
+ return(0);
+}
+
+void smp_call_function_slave(int cpu);
+
+void IPI_handler(int cpu)
+{
+ unsigned char c;
+ int fd;
+
+ fd = cpu_data[cpu].ipi_pipe[0];
+ while (read(fd, &c, 1) == 1) {
+ switch (c) {
+ case 'C':
+ smp_call_function_slave(cpu);
+ break;
+
+ case 'R':
+ set_tsk_need_resched(current);
+ break;
+
+ case 'S':
+ printk("CPU#%d stopping\n", cpu);
+ while(1)
+ pause();
+ break;
+
+ default:
+ printk("CPU#%d received unknown IPI [%c]!\n", cpu, c);
+ break;
+ }
+ }
+}
+
+int hard_smp_processor_id(void)
+{
+ return(pid_to_processor_id(os_getpid()));
+}
+
+static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static atomic_t scf_started;
+static atomic_t scf_finished;
+static void (*func)(void *info);
+static void *info;
+
+void smp_call_function_slave(int cpu)
+{
+ atomic_inc(&scf_started);
+ (*func)(info);
+ atomic_inc(&scf_finished);
+}
+
+int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
+ int wait)
+{
+ int cpus = num_online_cpus() - 1;
+ int i;
+
+ if (!cpus)
+ return 0;
+
+ spin_lock_bh(&call_lock);
+ atomic_set(&scf_started, 0);
+ atomic_set(&scf_finished, 0);
+ func = _func;
+ info = _info;
+
+ for (i=0;i<NR_CPUS;i++)
+ if((i != current->thread_info->cpu) &&
+ cpu_isset(i, cpu_online_map))
+ write(cpu_data[i].ipi_pipe[1], "C", 1);
+
+ while (atomic_read(&scf_started) != cpus)
+ barrier();
+
+ if (wait)
+ while (atomic_read(&scf_finished) != cpus)
+ barrier();
+
+ spin_unlock_bh(&call_lock);
+ return 0;
+}
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
extern syscall_handler_t old_select;
extern syscall_handler_t sys_modify_ldt;
extern syscall_handler_t sys_rt_sigsuspend;
-
extern syscall_handler_t sys_vserver;
syscall_handler_t *sys_call_table[] = {
#include "linux/unistd.h"
#include "linux/slab.h"
#include "linux/utime.h"
-#include <linux/vs_cvirt.h>
-
#include "asm/mman.h"
#include "asm/uaccess.h"
#include "asm/ipc.h"
errno);
}
-void disable_timer(void)
-{
- struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
- if((setitimer(ITIMER_VIRTUAL, &disable, NULL) < 0) ||
- (setitimer(ITIMER_REAL, &disable, NULL) < 0))
- printk("disnable_timer - setitimer failed, errno = %d\n",
- errno);
-}
-
void switch_timers(int to_real)
{
struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
set_interval(ITIMER_REAL);
}
+static unsigned long long get_host_hz(void)
+{
+ char mhzline[16], *end;
+ unsigned long long mhz;
+ int ret, mult, rest, len;
+
+ ret = cpu_feature("cpu MHz", mhzline,
+ sizeof(mhzline) / sizeof(mhzline[0]));
+ if(!ret)
+ panic ("Could not get host MHZ");
+
+ mhz = strtoul(mhzline, &end, 10);
+
+ /* This business is to parse a floating point number without using
+ * floating types.
+ */
+
+ rest = 0;
+ mult = 0;
+ if(*end == '.'){
+ end++;
+ len = strlen(end);
+ if(len < 6)
+ mult = 6 - len;
+ else if(len > 6)
+ end[6] = '\0';
+ rest = strtoul(end, NULL, 10);
+ while(mult-- > 0)
+ rest *= 10;
+ }
+
+ return(1000000 * mhz + rest);
+}
+
+unsigned long long host_hz = 0;
+
extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
void time_init(void)
{
struct timespec now;
+ host_hz = get_host_hz();
if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
panic("Couldn't set SIGVTALRM handler");
set_interval(ITIMER_VIRTUAL);
#include "user_util.h"
#include "time_user.h"
#include "mode.h"
-#include "os.h"
u64 jiffies_64;
int timer_irq_inited = 0;
static int first_tick;
-static unsigned long long prev_usecs;
+static unsigned long long prev_tsc;
static long long delta; /* Deviation per interval */
-#define MILLION 1000000
+extern unsigned long long host_hz;
void timer_irq(union uml_pt_regs *regs)
{
if(first_tick){
#if defined(CONFIG_UML_REAL_TIME_CLOCK)
+ unsigned long long tsc;
/* We've had 1 tick */
- unsigned long long usecs = os_usecs();
+ tsc = time_stamp();
- delta += usecs - prev_usecs;
- prev_usecs = usecs;
+ delta += tsc - prev_tsc;
+ prev_tsc = tsc;
- /* Protect against the host clock being set backwards */
- if(delta < 0)
- delta = 0;
-
- ticks += (delta * HZ) / MILLION;
- delta -= (ticks * MILLION) / HZ;
+ ticks += (delta * HZ) / host_hz;
+ delta -= (ticks * host_hz) / HZ;
#else
ticks = 1;
#endif
}
else {
- prev_usecs = os_usecs();
+ prev_tsc = time_stamp();
first_tick = 1;
}
{
int i, n;
- n = (loops_per_jiffy * HZ * usecs) / MILLION;
+ n = (loops_per_jiffy * HZ * usecs) / 1000000;
for(i=0;i<n;i++) ;
}
{
int i, n;
- n = (loops_per_jiffy * HZ * usecs) / MILLION;
+ n = (loops_per_jiffy * HZ * usecs) / 1000000;
for(i=0;i<n;i++) ;
}
err = -ENOMEM;
goto out_of_memory;
default:
- BUG();
+ if (current->pid == 1) {
+ up_read(&mm->mmap_sem);
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ goto out;
}
pte = pte_offset_kernel(pmd, page);
} while(!pte_present(*pte));
down_read(&mm->mmap_sem);
goto survive;
}
+ err = -ENOMEM;
goto out;
}
list_add(&info->list, &physmem_remappers);
}
-static int check_remapped_addr(unsigned long address, int is_write, int is_user)
+static int check_remapped_addr(unsigned long address, int is_write)
{
struct remapper *remapper;
struct list_head *ele;
list_for_each(ele, &physmem_remappers){
remapper = list_entry(ele, struct remapper, list);
- if((*remapper->proc)(fd, address, is_write, offset, is_user))
+ if((*remapper->proc)(fd, address, is_write, offset))
return(1);
}
flush_tlb_kernel_vm();
return(0);
}
- else if(check_remapped_addr(address & PAGE_MASK, is_write, is_user))
+ else if(check_remapped_addr(address & PAGE_MASK, is_write))
return(0);
else if(current->mm == NULL)
panic("Segfault with no mm");
{
kill(pid, SIGKILL);
kill(pid, SIGCONT);
- do {
- int n;
- CATCH_EINTR(n = waitpid(pid, NULL, 0));
- if (n > 0)
- kill(pid, SIGCONT);
- else
- break;
- } while(1);
+ while(waitpid(pid, NULL, 0) > 0) kill(pid, SIGCONT);
}
/* Unlocked - don't care if this is a bit off */
void do_longjmp(void *b, int val)
{
- sigjmp_buf *buf = b;
+ jmp_buf *buf = b;
siglongjmp(*buf, val);
}
#include "kern_util.h"
#include "irq_user.h"
#include "time_user.h"
-#include "signal_user.h"
#include "mem_user.h"
#include "os.h"
#include "tlb.h"
do_exit(SIGKILL);
}
- new_pid = start_fork_tramp(current->thread_info, stack, 0, exec_tramp);
+ new_pid = start_fork_tramp((void *) current->thread.kernel_stack,
+ stack, 0, exec_tramp);
if(new_pid < 0){
printk(KERN_ERR
"flush_thread : new thread failed, errno = %d\n",
current->thread.request.u.exec.pid = new_pid;
unprotect_stack((unsigned long) current_thread);
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
- change_sig(SIGUSR1, 0);
enable_timer();
free_page(stack);
protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
void do_exec(int old_pid, int new_pid)
{
unsigned long regs[FRAME_SIZE];
- int err;
if((ptrace(PTRACE_ATTACH, new_pid, 0, 0) < 0) ||
- (ptrace(PTRACE_CONT, new_pid, 0, 0) < 0))
+ (ptrace(PTRACE_CONT, new_pid, 0, 0) < 0) ||
+ (waitpid(new_pid, 0, WUNTRACED) < 0))
tracer_panic("do_exec failed to attach proc - errno = %d",
errno);
- CATCH_EINTR(err = waitpid(new_pid, 0, WUNTRACED));
- if (err < 0)
- tracer_panic("do_exec failed to attach proc in waitpid - errno = %d",
- errno);
-
if(ptrace_getregs(old_pid, regs) < 0)
tracer_panic("do_exec failed to get registers - errno = %d",
errno);
if(!jail || debug)
remap_data(UML_ROUND_DOWN(&_stext), UML_ROUND_UP(&_etext), 1);
remap_data(UML_ROUND_DOWN(&_sdata), UML_ROUND_UP(&_edata), 1);
- remap_data(UML_ROUND_DOWN(&__bss_start), UML_ROUND_UP(&_end), 1);
+ remap_data(UML_ROUND_DOWN(&__bss_start), UML_ROUND_UP(brk_start), 1);
}
#ifdef CONFIG_HOST_2G_2G
os_close_file(current->thread.mode.tt.switch_pipe[1]);
}
-void suspend_new_thread(int fd)
-{
- int err;
- char c;
-
- os_stop_process(os_getpid());
- err = os_read_file(fd, &c, sizeof(c));
- if(err != sizeof(c))
- panic("read failed in suspend_new_thread, err = %d", -err);
-}
-
void schedule_tail(task_t *prev);
static void new_thread_handler(int sig)
local_irq_enable();
if(!run_kernel_thread(fn, arg, ¤t->thread.exec_buf))
do_exit(0);
-
- /* XXX No set_user_mode here because a newly execed process will
- * immediately segfault on its non-existent IP, coming straight back
- * to the signal handler, which will call set_user_mode on its way
- * out. This should probably change since it's confusing.
- */
}
static int new_thread_proc(void *stack)
local_irq_disable();
init_new_thread_stack(stack, new_thread_handler);
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
return(0);
}
init_new_thread_stack(stack, finish_fork_handler);
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
return(0);
}
err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1);
if(err < 0){
printk("copy_thread : pipe failed, err = %d\n", -err);
- goto out;
+ return(err);
}
stack = alloc_stack(0, 0);
clone_flags &= CLONE_VM;
p->thread.temp_stack = stack;
- new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
+ new_pid = start_fork_tramp((void *) p->thread.kernel_stack, stack,
+ clone_flags, tramp);
if(new_pid < 0){
printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
-new_pid);
current->thread.request.op = OP_FORK;
current->thread.request.u.fork.pid = new_pid;
os_usr1_process(os_getpid());
-
- /* Enable the signal and then disable it to ensure that it is handled
- * here, and nowhere else.
- */
- change_sig(SIGUSR1, 1);
-
- change_sig(SIGUSR1, 0);
- err = 0;
- out:
- return(err);
+ return(0);
}
void reboot_tt(void)
{
current->thread.request.op = OP_REBOOT;
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
}
void halt_tt(void)
{
current->thread.request.op = OP_HALT;
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
}
void kill_off_processes_tt(void)
current->thread.request.u.cb.proc = proc;
current->thread.request.u.cb.arg = arg;
os_usr1_process(os_getpid());
- change_sig(SIGUSR1, 1);
-
- change_sig(SIGUSR1, 0);
}
}
init_task.thread.mode.tt.extern_pid = pid;
err = os_pipe(init_task.thread.mode.tt.switch_pipe, 1, 1);
- if(err)
+ if(err)
panic("Can't create switch pipe for init_task, errno = %d",
-err);
}
void *sp;
int pages;
- pages = (1 << CONFIG_KERNEL_STACK_ORDER);
- sp = (void *) ((unsigned long) init_task.thread_info) +
- pages * PAGE_SIZE - sizeof(unsigned long);
+ pages = (1 << CONFIG_KERNEL_STACK_ORDER) - 2;
+ sp = (void *) init_task.thread.kernel_stack + pages * PAGE_SIZE -
+ sizeof(unsigned long);
return(tracer(start_kernel_proc, sp));
}
child_proxy(1, W_EXITCODE(0, 0));
while(debugger.waiting == 1){
- CATCH_EINTR(pid = waitpid(debugger.pid, &status, WUNTRACED));
+ pid = waitpid(debugger.pid, &status, WUNTRACED);
if(pid != debugger.pid){
printk("fake_child_exit - waitpid failed, "
"errno = %d\n", errno);
}
debugger_proxy(status, debugger.pid);
}
- CATCH_EINTR(pid = waitpid(debugger.pid, &status, WUNTRACED));
+ pid = waitpid(debugger.pid, &status, WUNTRACED);
if(pid != debugger.pid){
printk("fake_child_exit - waitpid failed, "
"errno = %d\n", errno);
printf("tracing thread pid = %d\n", tracing_pid);
pid = clone(signal_tramp, sp, CLONE_FILES | SIGCHLD, init_proc);
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ n = waitpid(pid, &status, WUNTRACED);
if(n < 0){
printf("waitpid on idle thread failed, errno = %d\n", errno);
exit(1);
}
set_cmdline("(tracing thread)");
while(1){
- CATCH_EINTR(pid = waitpid(-1, &status, WUNTRACED));
+ pid = waitpid(-1, &status, WUNTRACED);
if(pid <= 0){
if(errno != ECHILD){
printf("wait failed - errno = %d\n", errno);
unprotect_kernel_mem();
- /* This is done because to allow SIGSEGV to be delivered inside a SEGV
- * handler. This can happen in copy_user, and if SEGV is disabled,
- * the process will die.
- */
- if(sig == SIGSEGV)
- change_sig(SIGSEGV, 1);
-
r = &TASK_REGS(get_current())->tt;
save_regs = *r;
is_user = user_context(SC_SP(sc));
if(sig != SIGUSR2)
r->syscall = -1;
+ change_sig(SIGUSR1, 1);
info = &sig_info[sig];
if(!info->is_irq) unblock_signals();
if(is_user){
interrupt_end();
block_signals();
+ change_sig(SIGUSR1, 0);
set_user_mode(NULL);
}
*r = save_regs;
struct tt_regs save = TASK_REGS(get_current())->tt;
int ret;
unsigned long *faddrp = (unsigned long *)fault_addr;
- sigjmp_buf jbuf;
+ jmp_buf jbuf;
*fault_catcher = &jbuf;
if(sigsetjmp(jbuf, 1) == 0)
printf("set_tty_log_fd - strtoul failed on '%s'\n", name);
tty_log_fd = -1;
}
-
- *add = 0;
return 0;
}
{
unsigned long *faddrp = (unsigned long *) fault_addr, ret;
- sigjmp_buf jbuf;
+ jmp_buf jbuf;
*fault_catcher = &jbuf;
if(sigsetjmp(jbuf, 1) == 0){
(*op)(to, from, n);
#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
+#include "mprot.h"
#include "mem_user.h"
#include "mem.h"
#include "umid.h"
int linux_main(int argc, char **argv)
{
- unsigned long avail, diff;
+ unsigned long avail;
unsigned long virtmem_size, max_physmem;
unsigned int i, add;
brk_start = (unsigned long) sbrk(0);
CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start);
- /* Increase physical memory size for exec-shield users
- so they actually get what they asked for. This should
- add zero for non-exec shield users */
-
- diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
- if(diff > 1024 * 1024){
- printf("Adding %ld bytes to physical memory to account for "
- "exec-shield gap\n", diff);
- physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
- }
uml_physmem = uml_start;
uml_postsetup();
+ init_task.thread.kernel_stack = (unsigned long) &init_thread_info +
+ 2 * PAGE_SIZE;
+
task_protections((unsigned long) &init_thread_info);
os_flush_stdout();
}
if(strlen(name) > UMID_LEN - 1)
- (*printer)("Unique machine name is being truncated to %d "
+ (*printer)("Unique machine name is being truncated to %s "
"characters\n", UMID_LEN);
strlcpy(umid, name, sizeof(umid));
static int __init set_umid_arg(char *name, int *add)
{
- *add = 0;
return(set_umid(name, 0, printf));
}
static int __init set_uml_dir(char *name, int *add)
{
if((strlen(name) > 0) && (name[strlen(name) - 1] != '/')){
- uml_dir = malloc(strlen(name) + 2);
+ uml_dir = malloc(strlen(name) + 1);
if(uml_dir == NULL){
printf("Failed to malloc uml_dir - error = %d\n",
errno);
uml_dir = name;
- /* Return 0 here because do_initcalls doesn't look at
- * the return value.
- */
return(0);
}
sprintf(uml_dir, "%s/", name);
}
else uml_dir = name;
- return(0);
+ return 0;
}
static int __init make_uml_dir(void)
#include <stdlib.h>
#include <unistd.h>
#include <limits.h>
-#include <setjmp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/ptrace.h>
int status, ret;
while(1){
- CATCH_EINTR(ret = waitpid(pid, &status, WUNTRACED));
+ ret = waitpid(pid, &status, WUNTRACED);
if((ret < 0) ||
!WIFSTOPPED(status) || (WSTOPSIG(status) != sig)){
if(ret < 0){
+ if(errno == EINTR) continue;
printk("wait failed, errno = %d\n",
errno);
}
else if(WIFEXITED(status))
- printk("process %d exited with status %d\n",
- pid, WEXITSTATUS(status));
+ printk("process exited with status %d\n",
+ WEXITSTATUS(status));
else if(WIFSIGNALED(status))
- printk("process %d exited with signal %d\n",
- pid, WTERMSIG(status));
+ printk("process exited with signal %d\n",
+ WTERMSIG(status));
else if((WSTOPSIG(status) == SIGVTALRM) ||
(WSTOPSIG(status) == SIGALRM) ||
(WSTOPSIG(status) == SIGIO) ||
ptrace(cont_type, pid, 0, WSTOPSIG(status));
continue;
}
- else printk("process %d stopped with signal %d\n",
- pid, WSTOPSIG(status));
+ else printk("process stopped with signal %d\n",
+ WSTOPSIG(status));
panic("wait_for_stop failed to wait for %d to stop "
"with %d\n", pid, sig);
}
}
}
-int raw(int fd)
+int raw(int fd, int complain)
{
struct termios tt;
int err;
- CATCH_EINTR(err = tcgetattr(fd, &tt));
- if (err < 0) {
- printk("tcgetattr failed, errno = %d\n", errno);
- return(-errno);
- }
-
+ tcgetattr(fd, &tt);
cfmakeraw(&tt);
-
- CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt));
- if (err < 0) {
+ err = tcsetattr(fd, TCSANOW, &tt);
+ if((err < 0) && complain){
printk("tcsetattr failed, errno = %d\n", errno);
return(-errno);
}
-
- /* XXX tcsetattr could have applied only some changes
- * (and cfmakeraw() is a set of changes) */
return(0);
}
host.release, host.version, host.machine);
}
-int setjmp_wrapper(void (*proc)(void *, void *), ...)
-{
- va_list args;
- sigjmp_buf buf;
- int n;
-
- n = sigsetjmp(buf, 1);
- if(n == 0){
- va_start(args, proc);
- (*proc)(&buf, &args);
- }
- va_end(args);
- return(n);
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
#include "kern_util.h"
#include "mem_user.h"
#include "signal_user.h"
-#include "time_user.h"
-#include "irq_user.h"
#include "user.h"
#include "init.h"
#include "mode.h"
/* Reboot */
if(ret){
- int err;
-
printf("\n");
-
- /* Let any pending signals fire, then disable them. This
- * ensures that they won't be delivered after the exec, when
- * they are definitely not expected.
- */
- unblock_signals();
- disable_timer();
- err = deactivate_all_fds();
- if(err)
- printf("deactivate_all_fds failed, errno = %d\n", -err);
-
execvp(new_argv[0], new_argv);
perror("Failed to exec kernel");
ret = 1;
void *__wrap_malloc(int size)
{
- void *ret;
-
- if(!CAN_KMALLOC())
+ if(CAN_KMALLOC())
+ return(um_kmalloc(size));
+ else
return(__real_malloc(size));
- else if(size <= PAGE_SIZE) /* finding contiguos pages is hard */
- ret = um_kmalloc(size);
- else ret = um_vmalloc(size);
-
- /* glibc people insist that if malloc fails, errno should be
- * set by malloc as well. So we do.
- */
- if(ret == NULL)
- errno = ENOMEM;
-
- return(ret);
}
void *__wrap_calloc(int n, int size)
extern void __real_free(void *);
-extern unsigned long high_physmem;
-
void __wrap_free(void *ptr)
{
- unsigned long addr = (unsigned long) ptr;
-
- /* We need to know how the allocation happened, so it can be correctly
- * freed. This is done by seeing what region of memory the pointer is
- * in -
- * physical memory - kmalloc/kfree
- * kernel virtual memory - vmalloc/vfree
- * anywhere else - malloc/free
- * If kmalloc is not yet possible, then the kernel memory regions
- * may not be set up yet, and the variables not set up. So,
- * free is called.
- *
- * CAN_KMALLOC is checked because it would be bad to free a buffer
- * with kmalloc/vmalloc after they have been turned off during
- * shutdown.
- */
-
- if((addr >= uml_physmem) && (addr < high_physmem)){
- if(CAN_KMALLOC())
- kfree(ptr);
- }
- else if((addr >= start_vm) && (addr < end_vm)){
- if(CAN_KMALLOC())
- vfree(ptr);
- }
+ if(CAN_KMALLOC()) kfree(ptr);
else __real_free(ptr);
}
# Licensed under the GPL
#
-obj-y = aio.o file.o process.o time.o tty.o user_syms.o drivers/
+obj-y = file.o process.o tty.o user_syms.o drivers/
-USER_OBJS := $(foreach file,aio.o file.o process.o time.o tty.o,$(obj)/$(file))
+USER_OBJS := $(foreach file,file.o process.o tty.o,$(obj)/$(file))
$(USER_OBJS) : %.o: %.c
$(CC) $(CFLAGS_$(notdir $@)) $(USER_CFLAGS) -c -o $@ $<
-
-HAVE_AIO_ABI = $(shell [ -e /usr/include/linux/aio_abi.h ] && \
- echo -DHAVE_AIO_ABI)
-HAVE_AIO_LIBC = $(shell objdump -T /lib/libc-*.so | grep io_submit && \
- echo -DHAVE_AIO_LIBC)
-CFLAGS_aio.o = $(HAVE_AIO_ABI) $(HAVE_AIO_LIBC)
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <signal.h>
-#include <errno.h>
-#include <sched.h>
-#include <sys/syscall.h>
-#include "os.h"
-#include "helper.h"
-#include "aio.h"
-#include "init.h"
-#include "user.h"
-#include "mode.h"
-
-struct aio_thread_req {
- enum aio_type type;
- int io_fd;
- unsigned long long offset;
- char *buf;
- int len;
- int reply_fd;
- void *data;
-};
-
-static int aio_req_fd_r = -1;
-static int aio_req_fd_w = -1;
-
-#if defined(HAVE_AIO_ABI)
-#include <linux/aio_abi.h>
-
-/* If we have the headers, we are going to build with AIO enabled.
- * If we don't have aio in libc, we define the necessary stubs here.
- */
-
-#if !defined(HAVE_AIO_LIBC)
-
-#define __NR_io_setup 245
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-
-static long io_setup(int n, aio_context_t *ctxp)
-{
- return(syscall(__NR_io_setup, n, ctxp));
-}
-
-static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
-{
- return(syscall(__NR_io_submit, ctx, nr, iocbpp));
-}
-
-static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
- struct io_event *events, struct timespec *timeout)
-{
- return(syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout));
-}
-
-#endif
-
-/* The AIO_MMAP cases force the mmapped page into memory here
- * rather than in whatever place first touches the data. I used
- * to do this by touching the page, but that's delicate because
- * gcc is prone to optimizing that away. So, what's done here
- * is we read from the descriptor from which the page was
- * mapped. The caller is required to pass an offset which is
- * inside the page that was mapped. Thus, when the read
- * returns, we know that the page is in the page cache, and
- * that it now backs the mmapped area.
- */
-
-static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
- int len, unsigned long long offset, void *data)
-{
- struct iocb iocb, *iocbp = &iocb;
- char c;
- int err;
-
- iocb = ((struct iocb) { .aio_data = (unsigned long) data,
- .aio_reqprio = 0,
- .aio_fildes = fd,
- .aio_buf = (unsigned long) buf,
- .aio_nbytes = len,
- .aio_offset = offset,
- .aio_reserved1 = 0,
- .aio_reserved2 = 0,
- .aio_reserved3 = 0 });
-
- switch(type){
- case AIO_READ:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- err = io_submit(ctx, 1, &iocbp);
- break;
- case AIO_WRITE:
- iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
- err = io_submit(ctx, 1, &iocbp);
- break;
- case AIO_MMAP:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- iocb.aio_buf = (unsigned long) &c;
- iocb.aio_nbytes = sizeof(c);
- err = io_submit(ctx, 1, &iocbp);
- break;
- default:
- printk("Bogus op in do_aio - %d\n", type);
- err = -EINVAL;
- break;
- }
- if(err > 0)
- err = 0;
-
- return(err);
-}
-
-static aio_context_t ctx = 0;
-
-static int aio_thread(void *arg)
-{
- struct aio_thread_reply reply;
- struct io_event event;
- int err, n, reply_fd;
-
- signal(SIGWINCH, SIG_IGN);
-
- while(1){
- n = io_getevents(ctx, 1, 1, &event, NULL);
- if(n < 0){
- if(errno == EINTR)
- continue;
- printk("aio_thread - io_getevents failed, "
- "errno = %d\n", errno);
- }
- else {
- reply = ((struct aio_thread_reply)
- { .data = (void *) event.data,
- .err = event.res });
- reply_fd =
- ((struct aio_context *) event.data)->reply_fd;
- err = os_write_file(reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
- printk("not_aio_thread - write failed, "
- "fd = %d, err = %d\n",
- aio_req_fd_r, -err);
- }
- }
- return(0);
-}
-
-#endif
-
-static int do_not_aio(struct aio_thread_req *req)
-{
- char c;
- int err;
-
- switch(req->type){
- case AIO_READ:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_read_file(req->io_fd, req->buf, req->len);
- break;
- case AIO_WRITE:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_write_file(req->io_fd, req->buf, req->len);
- break;
- case AIO_MMAP:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_read_file(req->io_fd, &c, sizeof(c));
- break;
- default:
- printk("do_not_aio - bad request type : %d\n", req->type);
- err = -EINVAL;
- break;
- }
-
- out:
- return(err);
-}
-
-static int not_aio_thread(void *arg)
-{
- struct aio_thread_req req;
- struct aio_thread_reply reply;
- int err;
-
- signal(SIGWINCH, SIG_IGN);
- while(1){
- err = os_read_file(aio_req_fd_r, &req, sizeof(req));
- if(err != sizeof(req)){
- if(err < 0)
- printk("not_aio_thread - read failed, fd = %d, "
- "err = %d\n", aio_req_fd_r, -err);
- else {
- printk("not_aio_thread - short read, fd = %d, "
- "length = %d\n", aio_req_fd_r, err);
- }
- continue;
- }
- err = do_not_aio(&req);
- reply = ((struct aio_thread_reply) { .data = req.data,
- .err = err });
- err = os_write_file(req.reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
- printk("not_aio_thread - write failed, fd = %d, "
- "err = %d\n", aio_req_fd_r, -err);
- }
-}
-
-static int aio_pid = -1;
-
-static int init_aio_24(void)
-{
- unsigned long stack;
- int fds[2], err;
-
- err = os_pipe(fds, 1, 1);
- if(err)
- goto out;
-
- aio_req_fd_w = fds[0];
- aio_req_fd_r = fds[1];
- err = run_helper_thread(not_aio_thread, NULL,
- CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
- if(err < 0)
- goto out_close_pipe;
-
- aio_pid = err;
- goto out;
-
- out_close_pipe:
- os_close_file(fds[0]);
- os_close_file(fds[1]);
- aio_req_fd_w = -1;
- aio_req_fd_r = -1;
- out:
- return(0);
-}
-
-#ifdef HAVE_AIO_ABI
-#define DEFAULT_24_AIO 0
-static int init_aio_26(void)
-{
- unsigned long stack;
- int err;
-
- if(io_setup(256, &ctx)){
- printk("aio_thread failed to initialize context, err = %d\n",
- errno);
- return(-errno);
- }
-
- err = run_helper_thread(aio_thread, NULL,
- CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
- if(err < 0)
- return(-errno);
-
- aio_pid = err;
- err = 0;
- out:
- return(err);
-}
-
-int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd, void *data)
-{
- struct aio_thread_reply reply;
- int err;
-
- ((struct aio_context *) data)->reply_fd = reply_fd;
-
- err = do_aio(ctx, type, io_fd, buf, len, offset, data);
- if(err){
- reply = ((struct aio_thread_reply) { .data = data,
- .err = err });
- err = os_write_file(reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
- printk("submit_aio_26 - write failed, "
- "fd = %d, err = %d\n", reply_fd, -err);
- else err = 0;
- }
-
- return(err);
-}
-
-#else
-#define DEFAULT_24_AIO 1
-static int init_aio_26(void)
-{
- return(-ENOSYS);
-}
-
-int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd, void *data)
-{
- return(-ENOSYS);
-}
-#endif
-
-static int aio_24 = DEFAULT_24_AIO;
-
-static int __init set_aio_24(char *name, int *add)
-{
- aio_24 = 1;
- return(0);
-}
-
-__uml_setup("aio=2.4", set_aio_24,
-"aio=2.4\n"
-" This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
-" available. 2.4 AIO is a single thread that handles one request at a\n"
-" time, synchronously. 2.6 AIO is a thread which uses 2.5 AIO interface\n"
-" to handle an arbitrary number of pending requests. 2.6 AIO is not\n"
-" available in tt mode, on 2.4 hosts, or when UML is built with\n"
-" /usr/include/linux/aio_abi no available.\n\n"
-);
-
-static int init_aio(void)
-{
- int err;
-
- CHOOSE_MODE(({
- if(!aio_24){
- printk("Disabling 2.6 AIO in tt mode\n");
- aio_24 = 1;
- } }), (void) 0);
-
- if(!aio_24){
- err = init_aio_26();
- if(err && (errno == ENOSYS)){
- printk("2.6 AIO not supported on the host - "
- "reverting to 2.4 AIO\n");
- aio_24 = 1;
- }
- else return(err);
- }
-
- if(aio_24)
- return(init_aio_24());
-
- return(0);
-}
-
-__initcall(init_aio);
-
-static void exit_aio(void)
-{
- if(aio_pid != -1)
- os_kill_process(aio_pid, 1);
-}
-
-__uml_exitcall(exit_aio);
-
-int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd, void *data)
-{
- struct aio_thread_req req = { .type = type,
- .io_fd = io_fd,
- .offset = offset,
- .buf = buf,
- .len = len,
- .reply_fd = reply_fd,
- .data = data,
- };
- int err;
-
- err = os_write_file(aio_req_fd_w, &req, sizeof(req));
- if(err == sizeof(req))
- err = 0;
-
- return(err);
-}
-
-int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd, void *data)
-{
- if(aio_24)
- return(submit_aio_24(type, io_fd, buf, len, offset, reply_fd,
- data));
- else {
- return(submit_aio_26(type, io_fd, buf, len, offset, reply_fd,
- data));
- }
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
#include <net/if.h>
#include "user.h"
#include "kern_util.h"
-#include "user_util.h"
#include "net_user.h"
#include "etap.h"
#include "helper.h"
if(c != 1){
printk("etap_tramp : uml_net failed\n");
err = -EINVAL;
- CATCH_EINTR(n = waitpid(pid, &status, 0));
- if(n < 0)
+ if(waitpid(pid, &status, 0) < 0)
err = -errno;
else if(!WIFEXITED(status) || (WEXITSTATUS(status) != 1))
printk("uml_net didn't exit with status 1\n");
#include "net_user.h"
#include "tuntap.h"
#include "kern_util.h"
-#include "user_util.h"
#include "user.h"
#include "helper.h"
#include "os.h"
errno);
return(-errno);
}
- CATCH_EINTR(waitpid(pid, NULL, 0));
+ waitpid(pid, NULL, 0);
cmsg = CMSG_FIRSTHDR(&msg);
if(cmsg == NULL){
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
-#include <utime.h>
-#include <dirent.h>
-#include <linux/kdev_t.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/uio.h>
-#include <sys/utsname.h>
-#include <sys/vfs.h>
#include "os.h"
#include "user.h"
#include "kern_util.h"
static void copy_stat(struct uml_stat *dst, struct stat64 *src)
{
*dst = ((struct uml_stat) {
- .ust_major = MAJOR(src->st_dev), /* device */
- .ust_minor = MINOR(src->st_dev),
+ .ust_dev = src->st_dev, /* device */
.ust_ino = src->st_ino, /* inode */
.ust_mode = src->st_mode, /* protection */
.ust_nlink = src->st_nlink, /* number of hard links */
.ust_atime = src->st_atime, /* time of last access */
.ust_mtime = src->st_mtime, /* time of last modification */
.ust_ctime = src->st_ctime, /* time of last change */
- .ust_rmajor = MAJOR(src->st_rdev),
- .ust_rminor = MINOR(src->st_rdev),
});
}
return(err);
}
-int os_lstat_file(const char *file_name, struct uml_stat *ubuf)
-{
- struct stat64 sbuf;
- int err;
-
- do {
- err = lstat64(file_name, &sbuf);
- } while((err < 0) && (errno == EINTR)) ;
-
- if(err < 0)
- return(-errno);
-
- if(ubuf != NULL)
- copy_stat(ubuf, &sbuf);
- return(err);
-}
-
-int os_access(const char *file, int mode)
+int os_access(const char* file, int mode)
{
int amode, err;
- amode=(mode& OS_ACC_R_OK ? R_OK : 0) | (mode& OS_ACC_W_OK ? W_OK : 0) |
- (mode& OS_ACC_X_OK ? X_OK : 0) | (mode& OS_ACC_F_OK ? F_OK : 0) ;
+ amode=(mode&OS_ACC_R_OK ? R_OK : 0) | (mode&OS_ACC_W_OK ? W_OK : 0) |
+ (mode&OS_ACC_X_OK ? X_OK : 0) | (mode&OS_ACC_F_OK ? F_OK : 0) ;
err = access(file, amode);
if(err < 0)
return(0);
}
-int os_set_file_time(const char *file, unsigned long access, unsigned long mod)
-{
- struct utimbuf buf = ((struct utimbuf){ .actime = access,
- .modtime = mod });
- int err;
-
- err = utime(file, &buf);
- if(err < 0)
- return(-errno);
-
- return(0);
-}
-
-int os_set_file_perms(const char *file, int mode)
-{
- int err;
-
- err = chmod(file, mode);
- if(err < 0)
- return(-errno);
-
- return(0);
-}
-
-int os_set_file_owner(const char *file, int owner, int group)
-{
- int err;
-
- err = chown(file, owner, group);
- if(err < 0)
- return(-errno);
-
- return(0);
-}
-
void os_print_error(int error, const char* str)
{
errno = error < 0 ? -error : error;
if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
(fcntl(master, F_SETOWN, os_getpid()) < 0)){
- printk("fcntl F_SETFL or F_SETOWN failed, errno = %d\n",
- errno);
+ printk("fcntl F_SETFL or F_SETOWN failed, errno = %d\n", errno);
return(-errno);
}
struct uml_stat buf;
int err;
- err = os_lstat_file(file, &buf);
+ err = os_stat_file(file, &buf);
if(err < 0)
return(err);
if(flags.c) f |= O_CREAT;
if(flags.t) f |= O_TRUNC;
if(flags.e) f |= O_EXCL;
- if(flags.d) f |= O_DIRECT;
fd = open64(file, f, mode);
if(fd < 0)
return(fd);
}
-void *os_open_dir(char *path, int *err_out)
-{
- void *dir;
-
- dir = opendir(path);
- *err_out = -errno;
- return(dir);
-}
-
-int os_seek_dir(void *stream, unsigned long long pos)
-{
- seekdir(stream, pos);
- return(0);
-}
-
-int os_read_dir(void *stream, unsigned long long *ino_out, char **name_out)
-{
- struct dirent *ent;
-
- errno = 0;
- ent = readdir(stream);
- if(ent == NULL){
- if(errno != 0)
- return(-errno);
- *name_out = NULL;
- return(0);
- }
-
- *ino_out = ent->d_ino;
- *name_out = ent->d_name;
- return(0);
-}
-
-int os_tell_dir(void *stream)
-{
- return(telldir(stream));
-}
-
-int os_close_dir(void *stream)
-{
- int err;
-
- err = closedir(stream);
- if(err < 0)
- return(-errno);
- return(0);
-}
-
-int os_remove_file(const char *file)
-{
- int err;
-
- err = unlink(file);
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_move_file(const char *from, const char *to)
-{
- int err;
-
- err = rename(from, to);
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_truncate_fd(int fd, unsigned long long len)
-{
- int err;
-
- err = ftruncate(fd, len);
- if(err)
- return(-errno);
- return(0);
-}
-
-int os_truncate_file(const char *file, unsigned long long len)
-{
- int err;
-
- err = truncate(file, len);
- if(err)
- return(-errno);
- return(0);
-}
-
int os_connect_socket(char *name)
{
struct sockaddr_un sock;
__u64 actual;
actual = lseek64(fd, offset, SEEK_SET);
- if(actual != offset)
- return(-errno);
+ if(actual != offset) return(-errno);
return(0);
}
return(0);
}
-int os_fd_size(int fd, long long *size_out)
-{
- struct stat buf;
- int err;
-
- err = fstat(fd, &buf);
- if(err)
- return(-errno);
-
- *size_out = buf.st_size;
- return(0);
-}
-
int os_file_modtime(char *file, unsigned long *modtime)
{
struct uml_stat buf;
return(0);
}
-int os_clear_fd_async(int fd)
-{
- int flags = fcntl(fd, F_GETFL);
-
- flags &= ~(O_ASYNC | O_NONBLOCK);
- if(fcntl(fd, F_SETFL, flags) < 0)
- return(-errno);
- return(0);
-}
-
int os_set_fd_block(int fd, int blocking)
{
int flags;
return(sock);
}
-int os_make_symlink(const char *to, const char *from)
-{
- int err;
-
- err = symlink(to, from);
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_read_symlink(const char *file, char *buf, int size)
-{
- int err;
-
- err = readlink(file, buf, size);
- if(err < 0)
- return(-errno);
-
- return(err);
-}
-
-int os_link_file(const char *to, const char *from)
-{
- int err;
-
- err = link(to, from);
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_make_dir(const char *dir, int mode)
-{
- int err;
-
- err = mkdir(dir, mode);
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_make_dev(const char *name, int mode, int major, int minor)
-{
- int err;
-
- err = mknod(name, mode, MKDEV(major, minor));
- if(err)
- return(-errno);
-
- return(0);
-}
-
-int os_remove_dir(const char *dir)
-{
- int err;
-
- err = rmdir(dir);
- if(err)
- return(-errno);
-
- return(0);
-}
-
void os_flush_stdout(void)
{
fflush(stdout);
return(err);
}
-int os_stat_filesystem(char *path, long *bsize_out, long long *blocks_out,
- long long *bfree_out, long long *bavail_out,
- long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out)
-{
- struct statfs64 buf;
- int err;
-
- err = statfs64(path, &buf);
- if(err < 0)
- return(-errno);
-
- *bsize_out = buf.f_bsize;
- *blocks_out = buf.f_blocks;
- *bfree_out = buf.f_bfree;
- *bavail_out = buf.f_bavail;
- *files_out = buf.f_files;
- *ffree_out = buf.f_ffree;
- memcpy(fsid_out, &buf.f_fsid,
- sizeof(buf.f_fsid) > fsid_size ? fsid_size :
- sizeof(buf.f_fsid));
- *namelen_out = buf.f_namelen;
- spare_out[0] = buf.f_spare[0];
- spare_out[1] = buf.f_spare[1];
- spare_out[2] = buf.f_spare[2];
- spare_out[3] = buf.f_spare[3];
- spare_out[4] = buf.f_spare[4];
- spare_out[5] = buf.f_spare[5];
- return(0);
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
#include <sys/wait.h>
#include "os.h"
#include "user.h"
-#include "user_util.h"
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
-#define STAT_PATH_LEN sizeof("/proc/#######/stat\0")
-#define COMM_SCANF "%*[^)])"
-
unsigned long os_process_pc(int pid)
{
- char proc_stat[STAT_PATH_LEN], buf[256];
+ char proc_stat[sizeof("/proc/#####/stat\0")], buf[256];
unsigned long pc;
int fd, err;
}
os_close_file(fd);
pc = ARBITRARY_ADDR;
- if(sscanf(buf, "%*d " COMM_SCANF " %*c %*d %*d %*d %*d %*d %*d %*d "
+ if(sscanf(buf, "%*d %*s %*c %*d %*d %*d %*d %*d %*d %*d %*d "
"%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d "
- "%*d %*d %*d %*d %*d %lu", &pc) != 1){
+ "%*d %*d %*d %*d %ld", &pc) != 1){
printk("os_process_pc - couldn't find pc in '%s'\n", buf);
}
return(pc);
int os_process_parent(int pid)
{
- char stat[STAT_PATH_LEN];
+ char stat[sizeof("/proc/nnnnn/stat\0")];
char data[256];
int parent, n, fd;
}
parent = FAILURE_PID;
- n = sscanf(data, "%*d " COMM_SCANF " %*c %d", &parent);
+ /* XXX This will break if there is a space in the command */
+ n = sscanf(data, "%*d %*s %*c %d", &parent);
if(n != 1)
printk("Failed to scan '%s'\n", data);
{
kill(pid, SIGKILL);
if(reap_child)
- CATCH_EINTR(waitpid(pid, NULL, 0));
+ waitpid(pid, NULL, 0);
}
void os_usr1_process(int pid)
{
- kill(pid, SIGUSR1);
+ syscall(__NR_tkill, pid, SIGUSR1);
+ /* kill(pid, SIGUSR1); */
}
int os_getpid(void)
-obj-y = bitops.o bugs.o checksum.o fault.o ksyms.o ldt.o ptrace.o \
- ptrace_user.o semaphore.o sigcontext.o syscalls.o sysrq.o
+obj-y = bugs.o checksum.o fault.o ksyms.o ldt.o ptrace.o ptrace_user.o \
+ semaphore.o sigcontext.o syscalls.o sysrq.o time.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_MODULES) += module.o
USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
-SYMLINKS = bitops.c semaphore.c highmem.c module.c
+SYMLINKS = semaphore.c highmem.c module.c
SYMLINKS := $(foreach f,$(SYMLINKS),$(src)/$f)
clean-files := $(SYMLINKS)
-bitops.c-dir = lib
semaphore.c-dir = kernel
highmem.c-dir = mm
module.c-dir = kernel
+++ /dev/null
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for nonzero in the first 32 bits:
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (*p >> bit));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No set bit yet, search remaining full words for a bit
- */
- res = find_first_bit (p, size - 32 * (p - addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
- unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for zero in the first 32 bits.
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (~(*p >> bit)));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No zero yet, search remaining full bytes for a zero
- */
- res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
int arch_handle_signal(int sig, union uml_pt_regs *regs)
{
- unsigned char tmp[2];
+ unsigned long ip;
/* This is testing for a cmov (0x0f 0x4x) instruction causing a
* SIGILL in init.
*/
if((sig != SIGILL) || (TASK_PID(get_current()) != 1)) return(0);
- if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2))
- panic("SIGILL in init, could not read instructions!\n");
- if((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
+ ip = UPT_IP(regs);
+ if((*((char *) ip) != 0x0f) || ((*((char *) (ip + 1)) & 0xf0) != 0x40))
return(0);
if(host_has_cmov == 0)
#ifdef CONFIG_MODE_TT
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
-/* XXX this needs copy_to_user and copy_from_user */
-
int sys_modify_ldt_tt(int func, void *ptr, unsigned long bytecount)
{
if(verify_area(VERIFY_READ, ptr, bytecount)) return(-EFAULT);
if(ptrace(PTRACE_POKEUSER, pid, &dummy->u_debugreg[i],
regs[i]) < 0)
printk("write_debugregs - ptrace failed on "
- "register %d, value = 0x%x, errno = %d\n", i,
- regs[i], errno);
+ "register %d, errno = %d\n", errno);
}
}
+++ /dev/null
-/*
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@redhat.com>
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-asmlinkage void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-asmlinkage void __sched __down(struct semaphore * sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-
-asmlinkage int __sched __down_interruptible(struct semaphore * sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * wait_queue_head. The "-1" is because we're
- * still hoping to get the semaphore.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-asmlinkage int __down_trylock(struct semaphore * sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock in the
- * wait_queue_head.
- */
- if (!atomic_add_negative(sleepers, &sem->count)) {
- wake_up_locked(&sem->wait);
- }
-
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- return 1;
-}
-
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %ecx contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax when used as a return
- * value..
- */
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __down_failed\n"
-"__down_failed:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %eax\n\t"
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
- "popl %eax\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __down_failed_interruptible\n"
-"__down_failed_interruptible:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down_interruptible\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __down_failed_trylock\n"
-"__down_failed_trylock:\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "pushl %ebp\n\t"
- "movl %esp,%ebp\n\t"
-#endif
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __down_trylock\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
-#if defined(CONFIG_FRAME_POINTER)
- "movl %ebp,%esp\n\t"
- "popl %ebp\n\t"
-#endif
- "ret"
-);
-
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __up_wakeup\n"
-"__up_wakeup:\n\t"
- "pushl %eax\n\t"
- "pushl %edx\n\t"
- "pushl %ecx\n\t"
- "call __up\n\t"
- "popl %ecx\n\t"
- "popl %edx\n\t"
- "popl %eax\n\t"
- "ret"
-);
-
-/*
- * rw spinlock fallbacks
- */
-#if defined(CONFIG_SMP)
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __write_lock_failed\n"
-"__write_lock_failed:\n\t"
- LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
-"1: rep; nop\n\t"
- "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
- "jne 1b\n\t"
- LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
- "jnz __write_lock_failed\n\t"
- "ret"
-);
-
-asm(
-".section .sched.text\n"
-".align 4\n"
-".globl __read_lock_failed\n"
-"__read_lock_failed:\n\t"
- LOCK "incl (%eax)\n"
-"1: rep; nop\n\t"
- "cmpl $1,(%eax)\n\t"
- "js 1b\n\t"
- LOCK "decl (%eax)\n\t"
- "js __read_lock_failed\n\t"
- "ret"
-);
-#endif
-#include <stdlib.h>
-#include <sys/time.h>
+/*
+ * sys-i386/time.c
+ * Created 25.9.2002 Sapan Bhatia
+ *
+ */
-unsigned long long os_usecs(void)
+unsigned long long time_stamp(void)
{
- struct timeval tv;
+ unsigned long low, high;
- gettimeofday(&tv, NULL);
- return((unsigned long long) tv.tv_sec * 1000000 + tv.tv_usec);
+ asm("rdtsc" : "=a" (low), "=d" (high));
+ return((((unsigned long long) high) << 32) + low);
}
/*
HOSTCFLAGS_mk_task_kern.o := $(CFLAGS) $(CPPFLAGS)
HOSTCFLAGS_mk_constants_kern.o := $(CFLAGS) $(CPPFLAGS)
-
-clean:
- $(RM) -f $(host-progs)
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
.quad sys_tgkill /* 270 */
.quad compat_sys_utimes
.quad sys32_fadvise64_64
- .quad sys_vserver
+ .quad quiet_ni_syscall /* sys_vserver */
.quad sys_mbind
.quad compat_get_mempolicy /* 275 */
.quad sys_set_mempolicy
#include <linux/ptrace.h>
#include <linux/highuid.h>
#include <linux/vmalloc.h>
-#include <linux/vs_cvirt.h>
#include <asm/mman.h>
#include <asm/types.h>
#include <asm/uaccess.h>
oldvalp = (void *) A(a32.oldval);
newvalp = (void *) A(a32.newval);
- if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
+ if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
|| !access_ok(VERIFY_WRITE, namep, 0)
|| !access_ok(VERIFY_WRITE, oldvalp, 0)
|| !access_ok(VERIFY_WRITE, newvalp, 0))
unlock_kernel();
set_fs(old_fs);
- if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
+ if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
return -EFAULT;
return ret;
long
sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
{
- struct sigevent __user *p = NULL;
+ struct sigevent se;
+ mm_segment_t oldfs;
+ long err;
+
if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
memset(&se, 0, sizeof(struct sigevent));
if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
__get_user(se.sigev_signo, &se32->sigev_signo) ||
__get_user(se.sigev_notify, &se32->sigev_notify) ||
__copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)) ||
- copy_to_user(p, &se, sizeof(se)))
+ sizeof(se32->payload)))
return -EFAULT;
}
- return sys_timer_create(clock, p, timer_id);
+ if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
+ return -EFAULT;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_create(clock, se32 ? &se : NULL, timer_id);
+ set_fs(oldfs);
+
+ return err;
}
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
BUILD_14_IRQS(0xe)
#endif
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
IRQLIST_16(0xc), IRQLIST_16(0xd)
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
, IRQLIST_14(0xe)
#endif
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+ panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
}
/*
int apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
+cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = CPU_MASK_ALL };
int mp_current_pci_id = 0;
/* I/O APIC entries */
static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
-static u32 gart_unmapped_entry;
-
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
static void flush_gart(struct pci_dev *dev)
{
unsigned long flags;
+ int bus = dev ? dev->bus->number : -1;
+ cpumask_t bus_cpumask = pcibus_to_cpumask(bus);
int flushed = 0;
int i;
u32 w;
if (!northbridges[i])
continue;
+ if (bus >= 0 && !(cpu_isset(i, bus_cpumask)))
+ continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
/* Make sure the hardware actually executed the flush. */
flushed++;
}
if (!flushed)
- printk("nothing to flush?\n");
+ printk("nothing to flush? %d\n", bus);
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
unsigned long pages = 0;
int need = 0, nextneed;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb)
- return swiotlb_map_sg(&dev->dev,sg,nents,dir);
-#endif
-
BUG_ON(dir == PCI_DMA_NONE);
if (nents == 0)
return 0;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
+ iommu_gatt_base[iommu_page + i] = 0;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
unsigned long aper_size;
unsigned long iommu_start;
struct pci_dev *dev;
- unsigned long scratch;
- long i;
+
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
return -1;
}
}
-
+
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
*/
clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
- /*
- * Try to workaround a bug (thanks to BenH)
- * Set unmapped entries to a scratch page instead of 0.
- * Any prefetches that hit unmapped entries won't get an bus abort
- * then.
- */
- scratch = get_zeroed_page(GFP_KERNEL);
- if (!scratch)
- panic("Cannot allocate iommu scratch page");
- gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
- for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
- iommu_gatt_base[i] = gart_unmapped_entry;
-
for_all_nb(dev) {
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (me->used_math) {
fp = get_stack(ka, regs, sizeof(struct _fpstate));
- frame = (void __user *)round_down((unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
+ frame = (void __user *)round_down((u64)fp - sizeof(struct rt_sigframe), 16) - 8;
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
goto give_sigsegv;
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
- if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
+ if (*((volatile unsigned char *)phys_to_virt(8192))
== 0xA5)
/* trampoline started but...? */
printk("Stuck ??\n");
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
cpucount--;
}
+
+ /* mark "stuck" area as not stuck */
+ *((volatile unsigned *)phys_to_virt(8192)) = 0;
}
cycles_t cacheflush_time;
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/personality.h>
-#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
# Makefile for the linux x86_64-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extable.o pageattr.o mmap.o
+obj-y := init.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
__flush_tlb_all();
}
-static inline int page_is_ram (unsigned long pagenr)
+int page_is_ram (unsigned long pagenr)
{
int i;
return 0;
}
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
- * valid. The argument is a physical page number.
- *
- *
- * On x86-64, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
- if (pagenr <= 256)
- return 1;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
-}
-
-
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
+++ /dev/null
-/*
- * linux/arch/x86-64/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_IA32))
- return 1;
-
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
--- /dev/null
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_X86=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+
+#
+# Class Based Kernel Resource Management
+#
+CONFIG_CKRM=y
+CONFIG_RCFS_FS=y
+CONFIG_CKRM_TYPE_TASKCLASS=y
+CONFIG_CKRM_RES_NUMTASKS=y
+CONFIG_CKRM_TYPE_SOCKETCLASS=y
+CONFIG_CKRM_RES_LISTENAQ=m
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_HOTPLUG=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_DELAY_ACCT=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_X86_PC=y
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_VOYAGER is not set
+# CONFIG_X86_NUMAQ is not set
+# CONFIG_X86_SUMMIT is not set
+# CONFIG_X86_BIGSMP is not set
+# CONFIG_X86_VISWS is not set
+# CONFIG_X86_GENERICARCH is not set
+# CONFIG_X86_ES7000 is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+CONFIG_M686=y
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_PPRO_FENCE=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_4G=y
+CONFIG_X86_SWITCH_PAGETABLES=y
+CONFIG_X86_4G_VM_LAYOUT=y
+CONFIG_X86_UACCESS_INDIRECT=y
+CONFIG_X86_HIGH_ENTRY=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_X86_UP_APIC is not set
+CONFIG_X86_TSC=y
+CONFIG_X86_MCE=y
+# CONFIG_X86_MCE_NONFATAL is not set
+CONFIG_TOSHIBA=m
+CONFIG_I8K=m
+CONFIG_MICROCODE=m
+CONFIG_X86_MSR=m
+CONFIG_X86_CPUID=m
+
+#
+# Firmware Drivers
+#
+CONFIG_EDD=m
+# CONFIG_NOHIGHMEM is not set
+CONFIG_HIGHMEM4G=y
+# CONFIG_HIGHMEM64G is not set
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_EFI is not set
+CONFIG_REGPARM=y
+
+#
+# Power management options (ACPI, APM)
+#
+CONFIG_PM=y
+# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_PM_DISK is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+CONFIG_ACPI=y
+CONFIG_ACPI_BOOT=y
+CONFIG_ACPI_INTERPRETER=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_SLEEP_PROC_FS=y
+CONFIG_ACPI_AC=m
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=m
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_ACPI_ASUS=m
+CONFIG_ACPI_TOSHIBA=m
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_BUS=y
+CONFIG_ACPI_EC=y
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_PCI=y
+CONFIG_ACPI_SYSTEM=y
+CONFIG_X86_PM_TIMER=y
+
+#
+# APM (Advanced Power Management) BIOS Support
+#
+CONFIG_APM=y
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
+# CONFIG_APM_DO_ENABLE is not set
+CONFIG_APM_CPU_IDLE=y
+# CONFIG_APM_DISPLAY_BLANK is not set
+CONFIG_APM_RTC_IS_GMT=y
+# CONFIG_APM_ALLOW_INTS is not set
+# CONFIG_APM_REAL_MODE_POWER_OFF is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_PROC_INTF is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_24_API is not set
+CONFIG_CPU_FREQ_TABLE=y
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_X86_ACPI_CPUFREQ=m
+# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
+CONFIG_X86_POWERNOW_K6=m
+CONFIG_X86_POWERNOW_K7=y
+# CONFIG_X86_POWERNOW_K8 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+CONFIG_X86_SPEEDSTEP_CENTRINO=y
+CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
+CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI=y
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=m
+CONFIG_X86_P4_CLOCKMOD=m
+CONFIG_X86_SPEEDSTEP_LIB=y
+# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
+CONFIG_X86_LONGRUN=y
+CONFIG_X86_LONGHAUL=y
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+
+#
+# PCMCIA/CardBus support
+#
+CONFIG_PCMCIA=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_YENTA=m
+CONFIG_CARDBUS=y
+CONFIG_I82092=m
+CONFIG_I82365=m
+CONFIG_TCIC=m
+CONFIG_PCMCIA_PROBE=y
+
+#
+# PCI Hotplug Support
+#
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_FAKE is not set
+CONFIG_HOTPLUG_PCI_COMPAQ=m
+# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
+# CONFIG_HOTPLUG_PCI_ACPI is not set
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+CONFIG_HOTPLUG_PCI_PCIE=m
+CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE=y
+CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=m
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=m
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_PARTITIONS=m
+CONFIG_MTD_CONCAT=m
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_CMDLINE_PARTS=m
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_PNC2000 is not set
+CONFIG_MTD_SC520CDP=m
+CONFIG_MTD_NETSC520=m
+CONFIG_MTD_SBC_GXX=m
+CONFIG_MTD_ELAN_104NC=m
+CONFIG_MTD_SCx200_DOCFLASH=m
+CONFIG_MTD_AMD76XROM=m
+CONFIG_MTD_ICH2ROM=m
+CONFIG_MTD_SCB2_FLASH=m
+# CONFIG_MTD_NETtel is not set
+# CONFIG_MTD_DILNETPC is not set
+CONFIG_MTD_L440GX=m
+CONFIG_MTD_PCI=m
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_PMC551=m
+# CONFIG_MTD_PMC551_BUGFIX is not set
+# CONFIG_MTD_PMC551_DEBUG is not set
+# CONFIG_MTD_SLRAM is not set
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+# CONFIG_MTD_BLKMTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+CONFIG_MTD_DOC2000=m
+# CONFIG_MTD_DOC2001 is not set
+CONFIG_MTD_DOC2001PLUS=m
+CONFIG_MTD_DOCPROBE=m
+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
+CONFIG_MTD_DOCPROBE_ADDRESS=0
+
+#
+# NAND Flash Device Drivers
+#
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_NAND_IDS=m
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_CML1=m
+CONFIG_PARPORT_SERIAL=m
+# CONFIG_PARPORT_PC_FIFO is not set
+# CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_OTHER is not set
+CONFIG_PARPORT_1284=y
+
+#
+# Plug and Play support
+#
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG is not set
+
+#
+# Protocols
+#
+CONFIG_ISAPNP=y
+# CONFIG_PNPBIOS is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=m
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+CONFIG_BLK_CPQ_DA=m
+CONFIG_BLK_CPQ_CISS_DA=m
+CONFIG_CISS_SCSI_TAPE=y
+CONFIG_BLK_DEV_DAC960=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_CARMEL=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_LBD=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_IDEDISK_MULTI_MODE=y
+# CONFIG_IDEDISK_STROKE is not set
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=y
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPNP=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_RZ1000=y
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+CONFIG_BLK_DEV_ADMA=y
+CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_BLK_DEV_ALI15X3=y
+# CONFIG_WDC_ALI15X3 is not set
+CONFIG_BLK_DEV_AMD74XX=y
+CONFIG_BLK_DEV_ATIIXP=y
+CONFIG_BLK_DEV_CMD64X=y
+CONFIG_BLK_DEV_TRIFLEX=y
+CONFIG_BLK_DEV_CY82C693=y
+CONFIG_BLK_DEV_CS5520=y
+CONFIG_BLK_DEV_CS5530=y
+CONFIG_BLK_DEV_HPT34X=y
+# CONFIG_HPT34X_AUTODMA is not set
+CONFIG_BLK_DEV_HPT366=y
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+CONFIG_BLK_DEV_PDC202XX_OLD=y
+# CONFIG_PDC202XX_BURST is not set
+CONFIG_BLK_DEV_PDC202XX_NEW=y
+CONFIG_PDC202XX_FORCE=y
+CONFIG_BLK_DEV_SVWKS=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_BLK_DEV_SIS5513=y
+CONFIG_BLK_DEV_SLC90E66=y
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+CONFIG_SCSI_AHA152X=m
+CONFIG_SCSI_AHA1542=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_DEBUG_MASK=0
+# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
+CONFIG_SCSI_AIC7XXX_OLD=m
+CONFIG_SCSI_AIC79XX=m
+CONFIG_AIC79XX_CMDS_PER_DEVICE=4
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+# CONFIG_AIC79XX_DEBUG_ENABLE is not set
+CONFIG_AIC79XX_DEBUG_MASK=0
+# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
+# CONFIG_SCSI_DPT_I2O is not set
+CONFIG_SCSI_ADVANSYS=m
+CONFIG_SCSI_IN2000=m
+CONFIG_SCSI_MEGARAID=m
+CONFIG_SCSI_SATA=y
+CONFIG_SCSI_SATA_SVW=m
+CONFIG_SCSI_ATA_PIIX=m
+CONFIG_SCSI_SATA_PROMISE=m
+# CONFIG_SCSI_SATA_SX4 is not set
+CONFIG_SCSI_SATA_SIL=m
+CONFIG_SCSI_SATA_SIS=m
+CONFIG_SCSI_SATA_VIA=m
+CONFIG_SCSI_SATA_VITESSE=m
+CONFIG_SCSI_BUSLOGIC=m
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+CONFIG_SCSI_FUTURE_DOMAIN=m
+CONFIG_SCSI_GDTH=m
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+CONFIG_SCSI_IPS=m
+CONFIG_SCSI_INIA100=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+# CONFIG_SCSI_IZIP_EPP16 is not set
+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
+# CONFIG_SCSI_NCR53C406A is not set
+CONFIG_SCSI_SYM53C8XX_2=m
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PSI240I is not set
+CONFIG_SCSI_QLOGIC_FAS=m
+CONFIG_SCSI_QLOGIC_ISP=m
+# CONFIG_SCSI_QLOGIC_FC is not set
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA2XXX=m
+CONFIG_SCSI_QLA21XX=m
+CONFIG_SCSI_QLA22XX=m
+CONFIG_SCSI_QLA2300=m
+CONFIG_SCSI_QLA2322=m
+CONFIG_SCSI_QLA6312=m
+CONFIG_SCSI_QLA6322=m
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+CONFIG_PCMCIA_AHA152X=m
+CONFIG_PCMCIA_FDOMAIN=m
+CONFIG_PCMCIA_NINJA_SCSI=m
+CONFIG_PCMCIA_QLOGIC=m
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=m
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+CONFIG_I2O=m
+CONFIG_I2O_CONFIG=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_NAT=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_TOS=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_ACCEPT_QUEUES=y
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_PHYSDEV=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_LOCAL=y
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_RAW=m
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+
+#
+# Bridge: Netfilter Configuration
+#
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=y
+CONFIG_LTPC=m
+CONFIG_COPS=m
+CONFIG_COPS_DAYNA=y
+CONFIG_COPS_TANGENT=y
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+CONFIG_NET_DIVERT=y
+# CONFIG_ECONET is not set
+CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_CSZ=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_DELAY=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+
+#
+# Old SIR device drivers
+#
+CONFIG_IRPORT_SIR=m
+
+#
+# Old Serial dongle support
+#
+# CONFIG_DONGLE_OLD is not set
+
+#
+# FIR device drivers
+#
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_NSC_FIR=m
+# CONFIG_WINBOND_FIR is not set
+# CONFIG_TOSHIBA_FIR is not set
+# CONFIG_SMC_IRCC_FIR is not set
+# CONFIG_ALI_FIR is not set
+# CONFIG_VLSI_FIR is not set
+# CONFIG_VIA_FIR is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_BCSP_TXCRC=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIBTUART=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_ETHERTAP=m
+CONFIG_NET_SB1000=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_EL1=m
+CONFIG_EL2=m
+CONFIG_ELPLUS=m
+CONFIG_EL16=m
+CONFIG_EL3=m
+CONFIG_3C515=m
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_LANCE=m
+CONFIG_NET_VENDOR_SMC=y
+CONFIG_WD80x3=m
+CONFIG_ULTRA=m
+CONFIG_SMC9194=m
+CONFIG_NET_VENDOR_RACAL=y
+# CONFIG_NI5010 is not set
+CONFIG_NI52=m
+CONFIG_NI65=m
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=m
+# CONFIG_TULIP_MWI is not set
+CONFIG_TULIP_MMIO=y
+# CONFIG_TULIP_NAPI is not set
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+CONFIG_DM9102=m
+CONFIG_PCMCIA_XIRCOM=m
+# CONFIG_PCMCIA_XIRTULIP is not set
+# CONFIG_AT1700 is not set
+CONFIG_DEPCA=m
+CONFIG_HP100=m
+# CONFIG_NET_ISA is not set
+# CONFIG_NE2000 is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+CONFIG_AMD8111_ETH=m
+CONFIG_AMD8111E_NAPI=y
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ADAPTEC_STARFIRE_NAPI=y
+CONFIG_AC3200=m
+CONFIG_APRICOT=m
+CONFIG_B44=m
+CONFIG_FORCEDETH=m
+CONFIG_CS89x0=m
+CONFIG_DGRS=m
+CONFIG_EEPRO100=m
+# CONFIG_EEPRO100_PIO is not set
+CONFIG_E100=m
+CONFIG_E100_NAPI=y
+CONFIG_FEALNX=m
+CONFIG_NATSEMI=m
+CONFIG_NE2K_PCI=m
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+CONFIG_8139TOO_8129=y
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_SIS900=m
+CONFIG_EPIC100=m
+CONFIG_SUNDANCE=m
+# CONFIG_SUNDANCE_MMIO is not set
+CONFIG_TLAN=m
+CONFIG_VIA_RHINE=m
+CONFIG_VIA_RHINE_MMIO=y
+CONFIG_NET_POCKET=y
+CONFIG_ATP=m
+CONFIG_DE600=m
+CONFIG_DE620=m
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=m
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_DL2K=m
+CONFIG_E1000=m
+CONFIG_E1000_NAPI=y
+CONFIG_NS83820=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_R8169=m
+CONFIG_SK98LIN=m
+CONFIG_TIGON3=m
+
+#
+# Ethernet (10000 Mbit)
+#
+CONFIG_IXGB=m
+CONFIG_IXGB_NAPI=y
+CONFIG_S2IO=m
+CONFIG_S2IO_NAPI=y
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+# CONFIG_ARLAN is not set
+CONFIG_WAVELAN=m
+CONFIG_PCMCIA_WAVELAN=m
+CONFIG_PCMCIA_NETWAVE=m
+
+#
+# Wireless 802.11 Frequency Hopping cards support
+#
+# CONFIG_PCMCIA_RAYCS is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+CONFIG_AIRO=m
+CONFIG_HERMES=m
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_PCI_HERMES=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+
+#
+# Wireless 802.11b Pcmcia/Cardbus cards support
+#
+CONFIG_PCMCIA_HERMES=m
+CONFIG_AIRO_CS=m
+CONFIG_PCMCIA_ATMEL=m
+CONFIG_PCMCIA_WL3501=m
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_PRISM54=m
+CONFIG_NET_WIRELESS=y
+
+#
+# PCMCIA network device support
+#
+CONFIG_NET_PCMCIA=y
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_PCMCIA_AXNET=m
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_FDDI=y
+# CONFIG_DEFXX is not set
+CONFIG_SKFP=m
+# CONFIG_HIPPI is not set
+CONFIG_PLIP=m
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPPOE=m
+# CONFIG_SLIP is not set
+CONFIG_NET_FC=y
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+CONFIG_ISDN=m
+
+#
+# Old ISDN4Linux
+#
+CONFIG_ISDN_I4L=m
+CONFIG_ISDN_PPP=y
+CONFIG_ISDN_PPP_VJ=y
+CONFIG_ISDN_MPP=y
+CONFIG_IPPP_FILTER=y
+# CONFIG_ISDN_PPP_BSDCOMP is not set
+CONFIG_ISDN_AUDIO=y
+CONFIG_ISDN_TTY_FAX=y
+
+#
+# ISDN feature submodules
+#
+CONFIG_ISDN_DRV_LOOP=m
+
+#
+# ISDN4Linux hardware drivers
+#
+
+#
+# Passive cards
+#
+CONFIG_ISDN_DRV_HISAX=m
+
+#
+# D-channel protocol features
+#
+CONFIG_HISAX_EURO=y
+CONFIG_DE_AOC=y
+CONFIG_HISAX_NO_SENDCOMPLETE=y
+CONFIG_HISAX_NO_LLC=y
+CONFIG_HISAX_NO_KEYPAD=y
+CONFIG_HISAX_1TR6=y
+CONFIG_HISAX_NI1=y
+CONFIG_HISAX_MAX_CARDS=8
+
+#
+# HiSax supported cards
+#
+CONFIG_HISAX_16_0=y
+CONFIG_HISAX_16_3=y
+CONFIG_HISAX_TELESPCI=y
+CONFIG_HISAX_S0BOX=y
+CONFIG_HISAX_AVM_A1=y
+CONFIG_HISAX_FRITZPCI=y
+CONFIG_HISAX_AVM_A1_PCMCIA=y
+CONFIG_HISAX_ELSA=y
+CONFIG_HISAX_IX1MICROR2=y
+CONFIG_HISAX_DIEHLDIVA=y
+CONFIG_HISAX_ASUSCOM=y
+CONFIG_HISAX_TELEINT=y
+CONFIG_HISAX_HFCS=y
+CONFIG_HISAX_SEDLBAUER=y
+CONFIG_HISAX_SPORTSTER=y
+CONFIG_HISAX_MIC=y
+CONFIG_HISAX_NETJET=y
+CONFIG_HISAX_NETJET_U=y
+CONFIG_HISAX_NICCY=y
+CONFIG_HISAX_ISURF=y
+CONFIG_HISAX_HSTSAPHIR=y
+CONFIG_HISAX_BKM_A4T=y
+CONFIG_HISAX_SCT_QUADRO=y
+CONFIG_HISAX_GAZEL=y
+CONFIG_HISAX_HFC_PCI=y
+CONFIG_HISAX_W6692=y
+CONFIG_HISAX_HFC_SX=y
+CONFIG_HISAX_ENTERNOW_PCI=y
+# CONFIG_HISAX_DEBUG is not set
+
+#
+# HiSax PCMCIA card service modules
+#
+CONFIG_HISAX_SEDLBAUER_CS=m
+CONFIG_HISAX_ELSA_CS=m
+CONFIG_HISAX_AVM_A1_CS=m
+CONFIG_HISAX_TELES_CS=m
+
+#
+# HiSax sub driver modules
+#
+CONFIG_HISAX_ST5481=m
+CONFIG_HISAX_HFCUSB=m
+CONFIG_HISAX_FRITZ_PCIPNP=m
+CONFIG_HISAX_HDLC=y
+
+#
+# Active cards
+#
+CONFIG_ISDN_DRV_ICN=m
+CONFIG_ISDN_DRV_PCBIT=m
+CONFIG_ISDN_DRV_SC=m
+CONFIG_ISDN_DRV_ACT2000=m
+CONFIG_ISDN_DRV_TPAM=m
+CONFIG_HYSDN=m
+CONFIG_HYSDN_CAPI=y
+
+#
+# CAPI subsystem
+#
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
+CONFIG_ISDN_CAPI_CAPIFS=m
+CONFIG_ISDN_CAPI_CAPIDRV=m
+
+#
+# CAPI hardware drivers
+#
+
+#
+# Active AVM cards
+#
+CONFIG_CAPI_AVM=y
+CONFIG_ISDN_DRV_AVMB1_B1ISA=m
+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
+CONFIG_ISDN_DRV_AVMB1_T1ISA=m
+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
+CONFIG_ISDN_DRV_AVMB1_C4=m
+
+#
+# Active Eicon DIVA Server cards
+#
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DIVAS=m
+CONFIG_ISDN_DIVAS_BRIPCI=y
+CONFIG_ISDN_DIVAS_PRIPCI=y
+CONFIG_ISDN_DIVAS_DIVACAPI=m
+CONFIG_ISDN_DIVAS_USERIDI=m
+CONFIG_ISDN_DIVAS_MAINT=m
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=m
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+CONFIG_GAMEPORT=m
+CONFIG_SOUND_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_GAMEPORT_EMU10K1=m
+CONFIG_GAMEPORT_VORTEX=m
+CONFIG_GAMEPORT_FM801=m
+CONFIG_GAMEPORT_CS461x=m
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_INPORT=m
+CONFIG_MOUSE_ATIXL=y
+CONFIG_MOUSE_LOGIBM=m
+CONFIG_MOUSE_PC110PAD=m
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_ANALOG=m
+CONFIG_JOYSTICK_A3D=m
+CONFIG_JOYSTICK_ADI=m
+CONFIG_JOYSTICK_COBRA=m
+CONFIG_JOYSTICK_GF2K=m
+CONFIG_JOYSTICK_GRIP=m
+CONFIG_JOYSTICK_GRIP_MP=m
+CONFIG_JOYSTICK_GUILLEMOT=m
+CONFIG_JOYSTICK_INTERACT=m
+CONFIG_JOYSTICK_SIDEWINDER=m
+CONFIG_JOYSTICK_TMDC=m
+CONFIG_JOYSTICK_IFORCE=m
+CONFIG_JOYSTICK_IFORCE_USB=y
+CONFIG_JOYSTICK_IFORCE_232=y
+CONFIG_JOYSTICK_WARRIOR=m
+CONFIG_JOYSTICK_MAGELLAN=m
+CONFIG_JOYSTICK_SPACEORB=m
+CONFIG_JOYSTICK_SPACEBALL=m
+CONFIG_JOYSTICK_STINGER=m
+CONFIG_JOYSTICK_TWIDDLER=m
+CONFIG_JOYSTICK_DB9=m
+CONFIG_JOYSTICK_GAMECON=m
+CONFIG_JOYSTICK_TURBOGRAFX=m
+# CONFIG_INPUT_JOYDUMP is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_GUNZE=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PCSPKR=m
+# CONFIG_INPUT_UINPUT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_COMPUTONE is not set
+CONFIG_ROCKETPORT=m
+# CONFIG_CYCLADES is not set
+# CONFIG_DIGIEPCA is not set
+# CONFIG_DIGI is not set
+# CONFIG_ESPSERIAL is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_N_HDLC=m
+# CONFIG_RISCOM8 is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_SX is not set
+# CONFIG_RIO is not set
+CONFIG_STALDRV=y
+# CONFIG_STALLION is not set
+# CONFIG_ISTALLION is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+# CONFIG_SERIAL_8250_ACPI is not set
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_MULTIPORT=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_CRASH=m
+CONFIG_PRINTER=m
+CONFIG_LP_CONSOLE=y
+CONFIG_PPDEV=m
+CONFIG_TIPAR=m
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ACQUIRE_WDT=m
+CONFIG_ADVANTECH_WDT=m
+CONFIG_ALIM1535_WDT=m
+CONFIG_ALIM7101_WDT=m
+CONFIG_SC520_WDT=m
+CONFIG_EUROTECH_WDT=m
+CONFIG_IB700_WDT=m
+CONFIG_WAFER_WDT=m
+CONFIG_I8XX_TCO=m
+CONFIG_SC1200_WDT=m
+# CONFIG_SCx200_WDT is not set
+# CONFIG_60XX_WDT is not set
+CONFIG_CPU5_WDT=m
+CONFIG_W83627HF_WDT=m
+CONFIG_W83877F_WDT=m
+CONFIG_MACHZ_WDT=m
+
+#
+# ISA-based Watchdog Cards
+#
+CONFIG_PCWATCHDOG=m
+# CONFIG_MIXCOMWD is not set
+CONFIG_WDT=m
+# CONFIG_WDT_501 is not set
+
+#
+# PCI-based Watchdog Cards
+#
+CONFIG_PCIPCWATCHDOG=m
+CONFIG_WDTPCI=m
+CONFIG_WDT_501_PCI=y
+
+#
+# USB-based Watchdog Cards
+#
+CONFIG_USBPCWATCHDOG=m
+CONFIG_HW_RANDOM=m
+CONFIG_NVRAM=m
+CONFIG_RTC=y
+CONFIG_DTLK=m
+CONFIG_R3964=m
+# CONFIG_APPLICOM is not set
+CONFIG_SONYPI=m
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+CONFIG_AGP=y
+CONFIG_AGP_ALI=y
+CONFIG_AGP_ATI=y
+CONFIG_AGP_AMD=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_INTEL_MCH=y
+CONFIG_AGP_NVIDIA=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_SWORKS=y
+CONFIG_AGP_VIA=y
+CONFIG_AGP_EFFICEON=y
+CONFIG_DRM=y
+CONFIG_DRM_TDFX=m
+CONFIG_DRM_GAMMA=m
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_I810=m
+CONFIG_DRM_I830=m
+CONFIG_DRM_MGA=m
+CONFIG_DRM_SIS=m
+
+#
+# PCMCIA character devices
+#
+CONFIG_SYNCLINK_CS=m
+CONFIG_MWAVE=m
+# CONFIG_RAW_DRIVER is not set
+CONFIG_HANGCHECK_TIMER=m
+
+#
+# I2C support
+#
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCF=m
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD8111=m
+# CONFIG_I2C_ELEKTOR is not set
+CONFIG_I2C_I801=m
+CONFIG_I2C_I810=m
+CONFIG_I2C_ISA=m
+CONFIG_I2C_NFORCE2=m
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_PROSAVAGE=m
+CONFIG_I2C_SAVAGE4=m
+# CONFIG_SCx200_ACB is not set
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+CONFIG_I2C_VOODOO3=m
+
+#
+# Hardware Sensors Chip support
+#
+CONFIG_I2C_SENSOR=m
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ASB100=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_FSCHER=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM90=m
+# CONFIG_SENSORS_MAX1619 is not set
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83627HF=m
+
+#
+# Other I2C Chip support
+#
+CONFIG_SENSORS_EEPROM=m
+CONFIG_SENSORS_PCF8574=m
+CONFIG_SENSORS_PCF8591=m
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+CONFIG_IBM_ASM=m
+
+#
+# Multimedia devices
+#
+CONFIG_VIDEO_DEV=m
+
+#
+# Video For Linux
+#
+
+#
+# Video Adapters
+#
+CONFIG_VIDEO_BT848=m
+CONFIG_VIDEO_PMS=m
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_W9966=m
+CONFIG_VIDEO_CPIA=m
+CONFIG_VIDEO_CPIA_PP=m
+CONFIG_VIDEO_CPIA_USB=m
+CONFIG_VIDEO_SAA5246A=m
+CONFIG_VIDEO_SAA5249=m
+CONFIG_TUNER_3036=m
+CONFIG_VIDEO_STRADIS=m
+CONFIG_VIDEO_ZORAN=m
+CONFIG_VIDEO_ZORAN_BUZ=m
+CONFIG_VIDEO_ZORAN_DC10=m
+CONFIG_VIDEO_ZORAN_DC30=m
+CONFIG_VIDEO_ZORAN_LML33=m
+CONFIG_VIDEO_ZORAN_LML33R10=m
+CONFIG_VIDEO_MEYE=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_MXB=m
+CONFIG_VIDEO_DPC=m
+CONFIG_VIDEO_HEXIUM_ORION=m
+CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_CX88=m
+
+#
+# Radio Adapters
+#
+CONFIG_RADIO_CADET=m
+CONFIG_RADIO_RTRACK=m
+CONFIG_RADIO_RTRACK2=m
+CONFIG_RADIO_AZTECH=m
+CONFIG_RADIO_GEMTEK=m
+CONFIG_RADIO_GEMTEK_PCI=m
+CONFIG_RADIO_MAXIRADIO=m
+CONFIG_RADIO_MAESTRO=m
+CONFIG_RADIO_SF16FMI=m
+CONFIG_RADIO_SF16FMR2=m
+CONFIG_RADIO_TERRATEC=m
+CONFIG_RADIO_TRUST=m
+CONFIG_RADIO_TYPHOON=m
+CONFIG_RADIO_TYPHOON_PROC_FS=y
+CONFIG_RADIO_ZOLTRIX=m
+
+#
+# Digital Video Broadcasting Devices
+#
+CONFIG_DVB=y
+CONFIG_DVB_CORE=m
+
+#
+# Supported Frontend Modules
+#
+CONFIG_DVB_TWINHAN_DST=m
+CONFIG_DVB_STV0299=m
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_ALPS_TDLB7 is not set
+CONFIG_DVB_ALPS_TDMB7=m
+CONFIG_DVB_ATMEL_AT76C651=m
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_GRUNDIG_29504_491=m
+CONFIG_DVB_GRUNDIG_29504_401=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_VES1X93=m
+# CONFIG_DVB_TDA1004X is not set
+CONFIG_DVB_NXT6000=m
+
+#
+# Supported SAA7146 based PCI Adapters
+#
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+CONFIG_DVB_B2C2_SKYSTAR=m
+
+#
+# Supported BT878 Adapters
+#
+CONFIG_DVB_BT8XX=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_VIDEO_VIDEOBUF=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_BUF=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_IR=m
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+CONFIG_FB_VGA16=m
+CONFIG_FB_VESA=y
+CONFIG_VIDEO_SELECT=y
+CONFIG_FB_HGA=m
+# CONFIG_FB_HGA_ACCEL is not set
+CONFIG_FB_RIVA=m
+CONFIG_FB_I810=m
+CONFIG_FB_I810_GTF=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G450=y
+CONFIG_FB_MATROX_G100=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
+CONFIG_FB_MATROX_MULTIHEAD=y
+# CONFIG_FB_RADEON_OLD is not set
+CONFIG_FB_RADEON=m
+CONFIG_FB_RADEON_I2C=y
+# CONFIG_FB_RADEON_DEBUG is not set
+CONFIG_FB_ATY128=m
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_XL_INIT is not set
+# CONFIG_FB_SIS is not set
+CONFIG_FB_NEOMAGIC=m
+CONFIG_FB_KYRO=m
+CONFIG_FB_3DFX=m
+# CONFIG_FB_3DFX_ACCEL is not set
+CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+# CONFIG_FB_TRIDENT_ACCEL is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_MDA_CONSOLE=m
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_RTCTIMER=m
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
+CONFIG_SND_OPL4_LIB=m
+CONFIG_SND_VX_LIB=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+# CONFIG_SND_SERIAL_U16550 is not set
+CONFIG_SND_MPU401=m
+
+#
+# ISA devices
+#
+CONFIG_SND_AD1816A=m
+CONFIG_SND_AD1848=m
+CONFIG_SND_CS4231=m
+CONFIG_SND_CS4232=m
+CONFIG_SND_CS4236=m
+CONFIG_SND_ES968=m
+CONFIG_SND_ES1688=m
+CONFIG_SND_ES18XX=m
+CONFIG_SND_GUSCLASSIC=m
+CONFIG_SND_GUSEXTREME=m
+CONFIG_SND_GUSMAX=m
+CONFIG_SND_INTERWAVE=m
+CONFIG_SND_INTERWAVE_STB=m
+CONFIG_SND_OPTI92X_AD1848=m
+CONFIG_SND_OPTI92X_CS4231=m
+CONFIG_SND_OPTI93X=m
+CONFIG_SND_SB8=m
+CONFIG_SND_SB16=m
+CONFIG_SND_SBAWE=m
+CONFIG_SND_SB16_CSP=y
+# CONFIG_SND_WAVEFRONT is not set
+CONFIG_SND_ALS100=m
+CONFIG_SND_AZT2320=m
+CONFIG_SND_CMI8330=m
+CONFIG_SND_DT019X=m
+CONFIG_SND_OPL3SA2=m
+CONFIG_SND_SGALAXY=m
+CONFIG_SND_SSCAPE=m
+
+#
+# PCI devices
+#
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_ALI5451=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_AZT3328=m
+CONFIG_SND_BT87X=m
+CONFIG_SND_CS46XX=m
+CONFIG_SND_CS46XX_NEW_DSP=y
+CONFIG_SND_CS4281=m
+CONFIG_SND_EMU10K1=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+CONFIG_SND_RME32=m
+CONFIG_SND_RME96=m
+CONFIG_SND_RME9652=m
+CONFIG_SND_HDSP=m
+CONFIG_SND_TRIDENT=m
+CONFIG_SND_YMFPCI=m
+CONFIG_SND_ALS4000=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_ES1938=m
+CONFIG_SND_ES1968=m
+CONFIG_SND_MAESTRO3=m
+CONFIG_SND_FM801=m
+CONFIG_SND_FM801_TEA575X=m
+CONFIG_SND_ICE1712=m
+CONFIG_SND_ICE1724=m
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_SONICVIBES=m
+CONFIG_SND_VIA82XX=m
+CONFIG_SND_VX222=m
+
+#
+# ALSA USB devices
+#
+CONFIG_SND_USB_AUDIO=m
+
+#
+# PCMCIA devices
+#
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_VXP440 is not set
+CONFIG_SND_PDAUDIOCF=m
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
+#
+# USB support
+#
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_SPLIT_ISO=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_UHCI_HCD=m
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_AUDIO=m
+
+#
+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
+#
+CONFIG_USB_MIDI=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_HP8200e=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+
+#
+# USB Human Interface Devices (HID)
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+CONFIG_HID_FF=y
+CONFIG_HID_PID=y
+CONFIG_LOGITECH_FF=y
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_AIPTEK=m
+CONFIG_USB_WACOM=m
+CONFIG_USB_KBTAB=m
+CONFIG_USB_POWERMATE=m
+CONFIG_USB_MTOUCH=m
+# CONFIG_USB_EGALAX is not set
+CONFIG_USB_XPAD=m
+CONFIG_USB_ATI_REMOTE=m
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_HPUSBSCSI=m
+
+#
+# USB Multimedia devices
+#
+CONFIG_USB_DABUSB=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_DSBR=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_OV511=m
+CONFIG_USB_SE401=m
+CONFIG_USB_STV680=m
+CONFIG_USB_W9968CF=m
+
+#
+# USB Network adaptors
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+
+#
+# USB Host-to-Host Cables
+#
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_GENESYS=y
+CONFIG_USB_NET1080=y
+CONFIG_USB_PL2301=y
+
+#
+# Intelligent USB Devices/Gadgets
+#
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_ZAURUS=y
+CONFIG_USB_CDCETHER=y
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_AX8817X=y
+
+#
+# USB port drivers
+#
+CONFIG_USB_USS720=m
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+# CONFIG_USB_EMI26 is not set
+CONFIG_USB_TIGL=m
+CONFIG_USB_AUERSWALD=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYTHERM=m
+# CONFIG_USB_PHIDGETSERVO is not set
+CONFIG_USB_TEST=m
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=m
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_SECURITY=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+CONFIG_DEVPTS_FS_SECURITY=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+CONFIG_RELAYFS_FS=y
+CONFIG_KLOG_CHANNEL=y
+CONFIG_KLOG_CHANNEL_AUTOENABLE=y
+CONFIG_KLOG_CHANNEL_SHIFT=21
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+# CONFIG_BEFS_DEBUG is not set
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_NAND=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+# CONFIG_HPFS_FS is not set
+CONFIG_QNX4FS_FS=m
+# CONFIG_QNX4FS_RW is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+CONFIG_NCP_FS=m
+CONFIG_NCPFS_PACKET_SIGNING=y
+CONFIG_NCPFS_IOCTL_LOCKING=y
+CONFIG_NCPFS_STRONG=y
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+CONFIG_NCPFS_SMALLDOS=y
+CONFIG_NCPFS_NLS=y
+CONFIG_NCPFS_EXTRAS=y
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_DEBUG_HIGHMEM=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_FRAME_POINTER is not set
+
+#
+# Linux VServer
+#
+CONFIG_VSERVER_LEGACY=y
+CONFIG_PROC_SECURE=y
+# CONFIG_VSERVER_HARDCPU is not set
+# CONFIG_INOXID_NONE is not set
+# CONFIG_INOXID_GID16 is not set
+CONFIG_INOXID_GID24=y
+# CONFIG_INOXID_GID32 is not set
+# CONFIG_INOXID_MAGIC is not set
+
+#
+# Security options
+#
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_ROOTPLUG is not set
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+# CONFIG_SECURITY_SELINUX_MLS is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_STD_RESOURCES=y
+CONFIG_PC=y
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
#
# Class Based Kernel Resource Management
#
-CONFIG_CKRM=y
-CONFIG_RCFS_FS=y
-CONFIG_CKRM_TYPE_TASKCLASS=y
-CONFIG_CKRM_RES_NUMTASKS=y
-CONFIG_CKRM_CPU_SCHEDULE=y
-CONFIG_CKRM_RES_BLKIO=y
-# CONFIG_CKRM_RES_MEM is not set
-# CONFIG_CKRM_TYPE_SOCKETCLASS is not set
-CONFIG_CKRM_RBCE=y
+# CONFIG_CKRM is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_HOTPLUG is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_OOM_PANIC=y
+# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
# CONFIG_DELAY_ACCT is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# Loadable module support
# CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SIG is not set
CONFIG_KMOD=y
#
CONFIG_X86_UACCESS_INDIRECT=y
CONFIG_X86_HIGH_ENTRY=y
CONFIG_HPET_TIMER=y
+# CONFIG_HPET_EMULATE_RTC is not set
# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
-CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_X86_UP_APIC is not set
CONFIG_X86_TSC=y
CONFIG_X86_MCE=y
#
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_AOUT is not set
-CONFIG_BINFMT_MISC=y
+CONFIG_BINFMT_MISC=m
#
# Device Drivers
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_BLK_DEV_DAC960=m
CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_CARMEL=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_BLK_DEV_INITRD=y
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
# CONFIG_BLK_DEV_HD_IDE is not set
CONFIG_BLK_DEV_IDEDISK=y
CONFIG_IDEDISK_MULTI_MODE=y
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
# SCSI low-level drivers
#
CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
# CONFIG_SCSI_7000FASST is not set
CONFIG_SCSI_ACARD=m
CONFIG_SCSI_AHA152X=m
CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_SCSI_DPT_I2O is not set
+CONFIG_SCSI_ADVANSYS=m
CONFIG_SCSI_IN2000=m
CONFIG_SCSI_MEGARAID=m
CONFIG_SCSI_SATA=y
CONFIG_SCSI_SATA_SVW=m
CONFIG_SCSI_ATA_PIIX=m
-CONFIG_SCSI_SATA_NV=m
CONFIG_SCSI_SATA_PROMISE=m
CONFIG_SCSI_SATA_SX4=m
CONFIG_SCSI_SATA_SIL=m
CONFIG_SCSI_SATA_VITESSE=m
CONFIG_SCSI_BUSLOGIC=m
# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_MD_MULTIPATH is not set
CONFIG_BLK_DEV_DM=y
# CONFIG_DM_CRYPT is not set
-# CONFIG_DM_SNAPSHOT is not set
-# CONFIG_DM_MIRROR is not set
-# CONFIG_DM_ZERO is not set
#
# Fusion MPT device support
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
+CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
# CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
+CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# IP: Virtual Server Configuration
#
# CONFIG_IP_VS is not set
-CONFIG_ICMP_IPOD=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
# CONFIG_IP_NF_COMPAT_IPFWADM is not set
CONFIG_IP_NF_TARGET_NOTRACK=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_REALM=m
#
# SCTP Configuration (EXPERIMENTAL)
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
-# CONFIG_NET_SCH_CBQ is not set
+CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
-# CONFIG_NET_SCH_HFSC is not set
-# CONFIG_NET_SCH_PRIO is not set
-# CONFIG_NET_SCH_RED is not set
-# CONFIG_NET_SCH_SFQ is not set
-# CONFIG_NET_SCH_TEQL is not set
-# CONFIG_NET_SCH_TBF is not set
-# CONFIG_NET_SCH_GRED is not set
-# CONFIG_NET_SCH_DSMARK is not set
-# CONFIG_NET_SCH_NETEM is not set
-# CONFIG_NET_SCH_INGRESS is not set
-# CONFIG_NET_QOS is not set
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_CSZ=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_DELAY=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
CONFIG_NET_CLS=y
-# CONFIG_NET_CLS_TCINDEX is not set
-# CONFIG_NET_CLS_ROUTE4 is not set
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_FW=m
-# CONFIG_NET_CLS_U32 is not set
-# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_POLICE=y
#
# Network testing
CONFIG_TLAN=m
CONFIG_VIA_RHINE=m
CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=m
CONFIG_NET_POCKET=y
CONFIG_ATP=m
CONFIG_DE600=m
#
# Watchdog Cards
#
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I8XX_TCO is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_SCx200_WDT is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+
+#
+# ISA-based Watchdog Cards
+#
+# CONFIG_PCWATCHDOG is not set
+# CONFIG_MIXCOMWD is not set
+# CONFIG_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_NVRAM is not set
# CONFIG_RTC is not set
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
# CONFIG_RAW_DRIVER is not set
-CONFIG_HANGCHECK_TIMER=y
+# CONFIG_HANGCHECK_TIMER is not set
#
# I2C support
#
# CONFIG_I2C is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
CONFIG_ZISOFS=y
CONFIG_ZISOFS_FS=y
CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
CONFIG_FAT_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
# CONFIG_NTFS_FS is not set
#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
#
#
# Kernel hacking
#
-# CONFIG_CRASH_DUMP is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
# Linux VServer
#
CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
-# CONFIG_VSERVER_HARDCPU is not set
+# CONFIG_PROC_SECURE is not set
+CONFIG_VSERVER_HARDCPU=y
# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-# CONFIG_VSERVER_DEBUG is not set
+CONFIG_INOXID_GID24=y
+# CONFIG_INOXID_GID32 is not set
+# CONFIG_INOXID_MAGIC is not set
#
# Security options
#
# Cryptographic options
#
-# CONFIG_CRYPTO is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_STD_RESOURCES=y
CONFIG_PC=y
--- /dev/null
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_X86=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+
+#
+# Class Based Kernel Resource Management
+#
+CONFIG_CKRM=y
+CONFIG_RCFS_FS=y
+CONFIG_CKRM_TYPE_TASKCLASS=y
+CONFIG_CKRM_RES_NUMTASKS=y
+CONFIG_CKRM_CPU_SCHEDULE=y
+CONFIG_CKRM_CPU_MONITOR=y
+CONFIG_CKRM_TYPE_SOCKETCLASS=y
+CONFIG_CKRM_RES_LISTENAQ=m
+CONFIG_CKRM_RBCE=m
+CONFIG_CKRM_CRBCE=m
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_HOTPLUG=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_DELAY_ACCT=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SIG is not set
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_X86_PC=y
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_VOYAGER is not set
+# CONFIG_X86_NUMAQ is not set
+# CONFIG_X86_SUMMIT is not set
+# CONFIG_X86_BIGSMP is not set
+# CONFIG_X86_VISWS is not set
+# CONFIG_X86_GENERICARCH is not set
+# CONFIG_X86_ES7000 is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+CONFIG_M686=y
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_PPRO_FENCE=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_4G=y
+CONFIG_X86_SWITCH_PAGETABLES=y
+CONFIG_X86_4G_VM_LAYOUT=y
+CONFIG_X86_UACCESS_INDIRECT=y
+CONFIG_X86_HIGH_ENTRY=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+# CONFIG_SMP is not set
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_X86_UP_APIC is not set
+CONFIG_X86_TSC=y
+CONFIG_X86_MCE=y
+# CONFIG_X86_MCE_NONFATAL is not set
+CONFIG_TOSHIBA=m
+CONFIG_I8K=m
+CONFIG_MICROCODE=m
+CONFIG_X86_MSR=m
+CONFIG_X86_CPUID=m
+
+#
+# Firmware Drivers
+#
+CONFIG_EDD=m
+# CONFIG_NOHIGHMEM is not set
+CONFIG_HIGHMEM4G=y
+# CONFIG_HIGHMEM64G is not set
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_EFI is not set
+CONFIG_REGPARM=y
+
+#
+# Power management options (ACPI, APM)
+#
+CONFIG_PM=y
+# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_PM_DISK is not set
+
+#
+# ACPI (Advanced Configuration and Power Interface) Support
+#
+CONFIG_ACPI=y
+CONFIG_ACPI_BOOT=y
+CONFIG_ACPI_INTERPRETER=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_SLEEP_PROC_FS=y
+CONFIG_ACPI_AC=m
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=m
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_ACPI_ASUS=m
+CONFIG_ACPI_TOSHIBA=m
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_BUS=y
+CONFIG_ACPI_EC=y
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_PCI=y
+CONFIG_ACPI_SYSTEM=y
+CONFIG_X86_PM_TIMER=y
+
+#
+# APM (Advanced Power Management) BIOS Support
+#
+CONFIG_APM=y
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
+# CONFIG_APM_DO_ENABLE is not set
+CONFIG_APM_CPU_IDLE=y
+# CONFIG_APM_DISPLAY_BLANK is not set
+CONFIG_APM_RTC_IS_GMT=y
+# CONFIG_APM_ALLOW_INTS is not set
+# CONFIG_APM_REAL_MODE_POWER_OFF is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_PROC_INTF is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_24_API is not set
+CONFIG_CPU_FREQ_TABLE=y
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_X86_ACPI_CPUFREQ=m
+# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
+CONFIG_X86_POWERNOW_K6=m
+CONFIG_X86_POWERNOW_K7=y
+# CONFIG_X86_POWERNOW_K8 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+CONFIG_X86_SPEEDSTEP_CENTRINO=y
+CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
+CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI=y
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=m
+CONFIG_X86_P4_CLOCKMOD=m
+CONFIG_X86_SPEEDSTEP_LIB=y
+# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
+CONFIG_X86_LONGRUN=y
+CONFIG_X86_LONGHAUL=y
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+
+#
+# PCMCIA/CardBus support
+#
+CONFIG_PCMCIA=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_YENTA=m
+CONFIG_CARDBUS=y
+# CONFIG_PD6729 is not set
+CONFIG_I82092=m
+CONFIG_I82365=m
+CONFIG_TCIC=m
+CONFIG_PCMCIA_PROBE=y
+
+#
+# PCI Hotplug Support
+#
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_FAKE is not set
+CONFIG_HOTPLUG_PCI_COMPAQ=m
+# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
+# CONFIG_HOTPLUG_PCI_ACPI is not set
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+CONFIG_HOTPLUG_PCI_PCIE=m
+CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE=y
+CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=m
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=m
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_PARTITIONS=m
+CONFIG_MTD_CONCAT=m
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_CMDLINE_PARTS=m
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_PNC2000 is not set
+CONFIG_MTD_SC520CDP=m
+CONFIG_MTD_NETSC520=m
+CONFIG_MTD_SBC_GXX=m
+CONFIG_MTD_ELAN_104NC=m
+CONFIG_MTD_SCx200_DOCFLASH=m
+CONFIG_MTD_AMD76XROM=m
+CONFIG_MTD_ICH2ROM=m
+CONFIG_MTD_SCB2_FLASH=m
+# CONFIG_MTD_NETtel is not set
+# CONFIG_MTD_DILNETPC is not set
+CONFIG_MTD_L440GX=m
+CONFIG_MTD_PCI=m
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_PMC551=m
+# CONFIG_MTD_PMC551_BUGFIX is not set
+# CONFIG_MTD_PMC551_DEBUG is not set
+# CONFIG_MTD_SLRAM is not set
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+# CONFIG_MTD_BLKMTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+CONFIG_MTD_DOC2000=m
+# CONFIG_MTD_DOC2001 is not set
+CONFIG_MTD_DOC2001PLUS=m
+CONFIG_MTD_DOCPROBE=m
+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
+CONFIG_MTD_DOCPROBE_ADDRESS=0
+
+#
+# NAND Flash Device Drivers
+#
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_NAND_IDS=m
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_CML1=m
+CONFIG_PARPORT_SERIAL=m
+# CONFIG_PARPORT_PC_FIFO is not set
+# CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_OTHER is not set
+CONFIG_PARPORT_1284=y
+
+#
+# Plug and Play support
+#
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG is not set
+
+#
+# Protocols
+#
+CONFIG_ISAPNP=y
+# CONFIG_PNPBIOS is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=m
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+CONFIG_BLK_CPQ_DA=m
+CONFIG_BLK_CPQ_CISS_DA=m
+CONFIG_CISS_SCSI_TAPE=y
+CONFIG_BLK_DEV_DAC960=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_LBD=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_IDEDISK_MULTI_MODE=y
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=y
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPNP=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_RZ1000=y
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+CONFIG_BLK_DEV_ADMA=y
+CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_BLK_DEV_ALI15X3=y
+# CONFIG_WDC_ALI15X3 is not set
+CONFIG_BLK_DEV_AMD74XX=y
+CONFIG_BLK_DEV_ATIIXP=y
+CONFIG_BLK_DEV_CMD64X=y
+CONFIG_BLK_DEV_TRIFLEX=y
+CONFIG_BLK_DEV_CY82C693=y
+CONFIG_BLK_DEV_CS5520=y
+CONFIG_BLK_DEV_CS5530=y
+CONFIG_BLK_DEV_HPT34X=y
+# CONFIG_HPT34X_AUTODMA is not set
+CONFIG_BLK_DEV_HPT366=y
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+CONFIG_BLK_DEV_PDC202XX_OLD=y
+# CONFIG_PDC202XX_BURST is not set
+CONFIG_BLK_DEV_PDC202XX_NEW=y
+CONFIG_PDC202XX_FORCE=y
+CONFIG_BLK_DEV_SVWKS=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_BLK_DEV_SIS5513=y
+CONFIG_BLK_DEV_SLC90E66=y
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+CONFIG_SCSI_AHA152X=m
+CONFIG_SCSI_AHA1542=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_DEBUG_MASK=0
+# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
+CONFIG_SCSI_AIC7XXX_OLD=m
+CONFIG_SCSI_AIC79XX=m
+CONFIG_AIC79XX_CMDS_PER_DEVICE=4
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+# CONFIG_AIC79XX_DEBUG_ENABLE is not set
+CONFIG_AIC79XX_DEBUG_MASK=0
+# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
+# CONFIG_SCSI_DPT_I2O is not set
+CONFIG_SCSI_IN2000=m
+CONFIG_SCSI_MEGARAID=m
+CONFIG_SCSI_SATA=y
+CONFIG_SCSI_SATA_SVW=m
+CONFIG_SCSI_ATA_PIIX=m
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=m
+# CONFIG_SCSI_SATA_SX4 is not set
+CONFIG_SCSI_SATA_SIL=m
+CONFIG_SCSI_SATA_SIS=m
+CONFIG_SCSI_SATA_VIA=m
+CONFIG_SCSI_SATA_VITESSE=m
+CONFIG_SCSI_BUSLOGIC=m
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+CONFIG_SCSI_FUTURE_DOMAIN=m
+CONFIG_SCSI_GDTH=m
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+CONFIG_SCSI_IPS=m
+CONFIG_SCSI_INIA100=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+# CONFIG_SCSI_IZIP_EPP16 is not set
+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
+# CONFIG_SCSI_NCR53C406A is not set
+CONFIG_SCSI_SYM53C8XX_2=m
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PSI240I is not set
+CONFIG_SCSI_QLOGIC_FAS=m
+CONFIG_SCSI_QLOGIC_ISP=m
+# CONFIG_SCSI_QLOGIC_FC is not set
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA2XXX=m
+CONFIG_SCSI_QLA21XX=m
+CONFIG_SCSI_QLA22XX=m
+CONFIG_SCSI_QLA2300=m
+CONFIG_SCSI_QLA2322=m
+CONFIG_SCSI_QLA6312=m
+CONFIG_SCSI_QLA6322=m
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+CONFIG_PCMCIA_AHA152X=m
+CONFIG_PCMCIA_FDOMAIN=m
+CONFIG_PCMCIA_NINJA_SCSI=m
+CONFIG_PCMCIA_QLOGIC=m
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=m
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+CONFIG_I2O=m
+CONFIG_I2O_CONFIG=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_NAT=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_TOS=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_ACCEPT_QUEUES=y
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_PHYSDEV=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_LOCAL=y
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_RAW=m
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+
+#
+# Bridge: Netfilter Configuration
+#
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=y
+CONFIG_LTPC=m
+CONFIG_COPS=m
+CONFIG_COPS_DAYNA=y
+CONFIG_COPS_TANGENT=y
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+CONFIG_NET_DIVERT=y
+# CONFIG_ECONET is not set
+CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+# CONFIG_NET_SCH_NETEM is not set
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+# CONFIG_CLS_U32_PERF is not set
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+
+#
+# Old SIR device drivers
+#
+CONFIG_IRPORT_SIR=m
+
+#
+# Old Serial dongle support
+#
+# CONFIG_DONGLE_OLD is not set
+
+#
+# FIR device drivers
+#
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_NSC_FIR=m
+# CONFIG_WINBOND_FIR is not set
+# CONFIG_TOSHIBA_FIR is not set
+# CONFIG_SMC_IRCC_FIR is not set
+# CONFIG_ALI_FIR is not set
+# CONFIG_VLSI_FIR is not set
+# CONFIG_VIA_FIR is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+# CONFIG_BT_HIDP is not set
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_BCSP_TXCRC=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIBTUART=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_ETHERTAP=m
+CONFIG_NET_SB1000=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_EL1=m
+CONFIG_EL2=m
+CONFIG_ELPLUS=m
+CONFIG_EL16=m
+CONFIG_EL3=m
+CONFIG_3C515=m
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_LANCE=m
+CONFIG_NET_VENDOR_SMC=y
+CONFIG_WD80x3=m
+CONFIG_ULTRA=m
+CONFIG_SMC9194=m
+CONFIG_NET_VENDOR_RACAL=y
+# CONFIG_NI5010 is not set
+CONFIG_NI52=m
+CONFIG_NI65=m
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=m
+# CONFIG_TULIP_MWI is not set
+CONFIG_TULIP_MMIO=y
+# CONFIG_TULIP_NAPI is not set
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+CONFIG_DM9102=m
+CONFIG_PCMCIA_XIRCOM=m
+# CONFIG_PCMCIA_XIRTULIP is not set
+# CONFIG_AT1700 is not set
+CONFIG_DEPCA=m
+CONFIG_HP100=m
+# CONFIG_NET_ISA is not set
+# CONFIG_NE2000 is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+CONFIG_AMD8111_ETH=m
+CONFIG_AMD8111E_NAPI=y
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ADAPTEC_STARFIRE_NAPI=y
+CONFIG_AC3200=m
+CONFIG_APRICOT=m
+CONFIG_B44=m
+CONFIG_FORCEDETH=m
+CONFIG_CS89x0=m
+CONFIG_DGRS=m
+CONFIG_EEPRO100=m
+# CONFIG_EEPRO100_PIO is not set
+CONFIG_E100=m
+CONFIG_E100_NAPI=y
+CONFIG_FEALNX=m
+CONFIG_NATSEMI=m
+CONFIG_NE2K_PCI=m
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+CONFIG_8139TOO_8129=y
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_SIS900=m
+CONFIG_EPIC100=m
+CONFIG_SUNDANCE=m
+# CONFIG_SUNDANCE_MMIO is not set
+CONFIG_TLAN=m
+CONFIG_VIA_RHINE=m
+CONFIG_VIA_RHINE_MMIO=y
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_POCKET=y
+CONFIG_ATP=m
+CONFIG_DE600=m
+CONFIG_DE620=m
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=m
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_DL2K=m
+CONFIG_E1000=m
+CONFIG_E1000_NAPI=y
+CONFIG_NS83820=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_R8169=m
+CONFIG_SK98LIN=m
+CONFIG_TIGON3=m
+
+#
+# Ethernet (10000 Mbit)
+#
+CONFIG_IXGB=m
+CONFIG_IXGB_NAPI=y
+CONFIG_S2IO=m
+CONFIG_S2IO_NAPI=y
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+# CONFIG_ARLAN is not set
+CONFIG_WAVELAN=m
+CONFIG_PCMCIA_WAVELAN=m
+CONFIG_PCMCIA_NETWAVE=m
+
+#
+# Wireless 802.11 Frequency Hopping cards support
+#
+# CONFIG_PCMCIA_RAYCS is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+CONFIG_AIRO=m
+CONFIG_HERMES=m
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_PCI_HERMES=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+
+#
+# Wireless 802.11b Pcmcia/Cardbus cards support
+#
+CONFIG_PCMCIA_HERMES=m
+CONFIG_AIRO_CS=m
+CONFIG_PCMCIA_ATMEL=m
+CONFIG_PCMCIA_WL3501=m
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_PRISM54=m
+CONFIG_NET_WIRELESS=y
+
+#
+# PCMCIA network device support
+#
+CONFIG_NET_PCMCIA=y
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_PCMCIA_AXNET=m
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_FDDI=y
+# CONFIG_DEFXX is not set
+CONFIG_SKFP=m
+# CONFIG_HIPPI is not set
+CONFIG_PLIP=m
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPPOE=m
+# CONFIG_SLIP is not set
+CONFIG_NET_FC=y
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+CONFIG_ISDN=m
+
+#
+# Old ISDN4Linux
+#
+CONFIG_ISDN_I4L=m
+CONFIG_ISDN_PPP=y
+CONFIG_ISDN_PPP_VJ=y
+CONFIG_ISDN_MPP=y
+CONFIG_IPPP_FILTER=y
+# CONFIG_ISDN_PPP_BSDCOMP is not set
+CONFIG_ISDN_AUDIO=y
+CONFIG_ISDN_TTY_FAX=y
+
+#
+# ISDN feature submodules
+#
+CONFIG_ISDN_DRV_LOOP=m
+
+#
+# ISDN4Linux hardware drivers
+#
+
+#
+# Passive cards
+#
+CONFIG_ISDN_DRV_HISAX=m
+
+#
+# D-channel protocol features
+#
+CONFIG_HISAX_EURO=y
+CONFIG_DE_AOC=y
+CONFIG_HISAX_NO_SENDCOMPLETE=y
+CONFIG_HISAX_NO_LLC=y
+CONFIG_HISAX_NO_KEYPAD=y
+CONFIG_HISAX_1TR6=y
+CONFIG_HISAX_NI1=y
+CONFIG_HISAX_MAX_CARDS=8
+
+#
+# HiSax supported cards
+#
+CONFIG_HISAX_16_0=y
+CONFIG_HISAX_16_3=y
+CONFIG_HISAX_TELESPCI=y
+CONFIG_HISAX_S0BOX=y
+CONFIG_HISAX_AVM_A1=y
+CONFIG_HISAX_FRITZPCI=y
+CONFIG_HISAX_AVM_A1_PCMCIA=y
+CONFIG_HISAX_ELSA=y
+CONFIG_HISAX_IX1MICROR2=y
+CONFIG_HISAX_DIEHLDIVA=y
+CONFIG_HISAX_ASUSCOM=y
+CONFIG_HISAX_TELEINT=y
+CONFIG_HISAX_HFCS=y
+CONFIG_HISAX_SEDLBAUER=y
+CONFIG_HISAX_SPORTSTER=y
+CONFIG_HISAX_MIC=y
+CONFIG_HISAX_NETJET=y
+CONFIG_HISAX_NETJET_U=y
+CONFIG_HISAX_NICCY=y
+CONFIG_HISAX_ISURF=y
+CONFIG_HISAX_HSTSAPHIR=y
+CONFIG_HISAX_BKM_A4T=y
+CONFIG_HISAX_SCT_QUADRO=y
+CONFIG_HISAX_GAZEL=y
+CONFIG_HISAX_HFC_PCI=y
+CONFIG_HISAX_W6692=y
+CONFIG_HISAX_HFC_SX=y
+CONFIG_HISAX_ENTERNOW_PCI=y
+# CONFIG_HISAX_DEBUG is not set
+
+#
+# HiSax PCMCIA card service modules
+#
+CONFIG_HISAX_SEDLBAUER_CS=m
+CONFIG_HISAX_ELSA_CS=m
+CONFIG_HISAX_AVM_A1_CS=m
+CONFIG_HISAX_TELES_CS=m
+
+#
+# HiSax sub driver modules
+#
+CONFIG_HISAX_ST5481=m
+CONFIG_HISAX_HFCUSB=m
+CONFIG_HISAX_FRITZ_PCIPNP=m
+CONFIG_HISAX_HDLC=y
+
+#
+# Active cards
+#
+CONFIG_ISDN_DRV_ICN=m
+CONFIG_ISDN_DRV_PCBIT=m
+CONFIG_ISDN_DRV_SC=m
+CONFIG_ISDN_DRV_ACT2000=m
+CONFIG_ISDN_DRV_TPAM=m
+CONFIG_HYSDN=m
+CONFIG_HYSDN_CAPI=y
+
+#
+# CAPI subsystem
+#
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
+CONFIG_ISDN_CAPI_CAPIFS=m
+CONFIG_ISDN_CAPI_CAPIDRV=m
+
+#
+# CAPI hardware drivers
+#
+
+#
+# Active AVM cards
+#
+CONFIG_CAPI_AVM=y
+CONFIG_ISDN_DRV_AVMB1_B1ISA=m
+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
+CONFIG_ISDN_DRV_AVMB1_T1ISA=m
+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
+CONFIG_ISDN_DRV_AVMB1_C4=m
+
+#
+# Active Eicon DIVA Server cards
+#
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DIVAS=m
+CONFIG_ISDN_DIVAS_BRIPCI=y
+CONFIG_ISDN_DIVAS_PRIPCI=y
+CONFIG_ISDN_DIVAS_DIVACAPI=m
+CONFIG_ISDN_DIVAS_USERIDI=m
+CONFIG_ISDN_DIVAS_MAINT=m
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=m
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+CONFIG_GAMEPORT=m
+CONFIG_SOUND_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_GAMEPORT_EMU10K1=m
+CONFIG_GAMEPORT_VORTEX=m
+CONFIG_GAMEPORT_FM801=m
+CONFIG_GAMEPORT_CS461x=m
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_INPORT=m
+CONFIG_MOUSE_ATIXL=y
+CONFIG_MOUSE_LOGIBM=m
+CONFIG_MOUSE_PC110PAD=m
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_ANALOG=m
+CONFIG_JOYSTICK_A3D=m
+CONFIG_JOYSTICK_ADI=m
+CONFIG_JOYSTICK_COBRA=m
+CONFIG_JOYSTICK_GF2K=m
+CONFIG_JOYSTICK_GRIP=m
+CONFIG_JOYSTICK_GRIP_MP=m
+CONFIG_JOYSTICK_GUILLEMOT=m
+CONFIG_JOYSTICK_INTERACT=m
+CONFIG_JOYSTICK_SIDEWINDER=m
+CONFIG_JOYSTICK_TMDC=m
+CONFIG_JOYSTICK_IFORCE=m
+CONFIG_JOYSTICK_IFORCE_USB=y
+CONFIG_JOYSTICK_IFORCE_232=y
+CONFIG_JOYSTICK_WARRIOR=m
+CONFIG_JOYSTICK_MAGELLAN=m
+CONFIG_JOYSTICK_SPACEORB=m
+CONFIG_JOYSTICK_SPACEBALL=m
+CONFIG_JOYSTICK_STINGER=m
+CONFIG_JOYSTICK_TWIDDLER=m
+CONFIG_JOYSTICK_DB9=m
+CONFIG_JOYSTICK_GAMECON=m
+CONFIG_JOYSTICK_TURBOGRAFX=m
+# CONFIG_INPUT_JOYDUMP is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_GUNZE=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PCSPKR=m
+# CONFIG_INPUT_UINPUT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_COMPUTONE is not set
+CONFIG_ROCKETPORT=m
+# CONFIG_CYCLADES is not set
+# CONFIG_DIGIEPCA is not set
+# CONFIG_DIGI is not set
+# CONFIG_ESPSERIAL is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_N_HDLC=m
+# CONFIG_RISCOM8 is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_SX is not set
+# CONFIG_RIO is not set
+CONFIG_STALDRV=y
+# CONFIG_STALLION is not set
+# CONFIG_ISTALLION is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+# CONFIG_SERIAL_8250_ACPI is not set
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_MULTIPORT=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_CRASH=m
+CONFIG_PRINTER=m
+CONFIG_LP_CONSOLE=y
+CONFIG_PPDEV=m
+CONFIG_TIPAR=m
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ACQUIRE_WDT=m
+CONFIG_ADVANTECH_WDT=m
+CONFIG_ALIM1535_WDT=m
+CONFIG_ALIM7101_WDT=m
+CONFIG_SC520_WDT=m
+CONFIG_EUROTECH_WDT=m
+CONFIG_IB700_WDT=m
+CONFIG_WAFER_WDT=m
+CONFIG_I8XX_TCO=m
+CONFIG_SC1200_WDT=m
+# CONFIG_SCx200_WDT is not set
+# CONFIG_60XX_WDT is not set
+CONFIG_CPU5_WDT=m
+CONFIG_W83627HF_WDT=m
+CONFIG_W83877F_WDT=m
+CONFIG_MACHZ_WDT=m
+
+#
+# ISA-based Watchdog Cards
+#
+CONFIG_PCWATCHDOG=m
+# CONFIG_MIXCOMWD is not set
+CONFIG_WDT=m
+# CONFIG_WDT_501 is not set
+
+#
+# PCI-based Watchdog Cards
+#
+CONFIG_PCIPCWATCHDOG=m
+CONFIG_WDTPCI=m
+CONFIG_WDT_501_PCI=y
+
+#
+# USB-based Watchdog Cards
+#
+CONFIG_USBPCWATCHDOG=m
+CONFIG_HW_RANDOM=m
+CONFIG_NVRAM=m
+CONFIG_RTC=y
+CONFIG_DTLK=m
+CONFIG_R3964=m
+# CONFIG_APPLICOM is not set
+CONFIG_SONYPI=m
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+CONFIG_AGP=y
+CONFIG_AGP_ALI=y
+CONFIG_AGP_ATI=y
+CONFIG_AGP_AMD=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_INTEL_MCH=y
+CONFIG_AGP_NVIDIA=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_SWORKS=y
+CONFIG_AGP_VIA=y
+CONFIG_AGP_EFFICEON=y
+CONFIG_DRM=y
+CONFIG_DRM_TDFX=m
+CONFIG_DRM_GAMMA=m
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_I810=m
+CONFIG_DRM_I830=m
+CONFIG_DRM_MGA=m
+CONFIG_DRM_SIS=m
+
+#
+# PCMCIA character devices
+#
+CONFIG_SYNCLINK_CS=m
+CONFIG_MWAVE=m
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HPET is not set
+CONFIG_HANGCHECK_TIMER=m
+
+#
+# I2C support
+#
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCF=m
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD8111=m
+# CONFIG_I2C_ELEKTOR is not set
+CONFIG_I2C_I801=m
+CONFIG_I2C_I810=m
+CONFIG_I2C_ISA=m
+CONFIG_I2C_NFORCE2=m
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_PROSAVAGE=m
+CONFIG_I2C_SAVAGE4=m
+# CONFIG_SCx200_ACB is not set
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+CONFIG_I2C_VOODOO3=m
+
+#
+# Hardware Sensors Chip support
+#
+CONFIG_I2C_SENSOR=m
+CONFIG_SENSORS_ADM1021=m
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+CONFIG_SENSORS_ASB100=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_FSCHER=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_LM75=m
+# CONFIG_SENSORS_LM77 is not set
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM90=m
+# CONFIG_SENSORS_MAX1619 is not set
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83627HF=m
+
+#
+# Other I2C Chip support
+#
+CONFIG_SENSORS_EEPROM=m
+CONFIG_SENSORS_PCF8574=m
+CONFIG_SENSORS_PCF8591=m
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+CONFIG_IBM_ASM=m
+
+#
+# Multimedia devices
+#
+CONFIG_VIDEO_DEV=m
+
+#
+# Video For Linux
+#
+
+#
+# Video Adapters
+#
+CONFIG_VIDEO_BT848=m
+CONFIG_VIDEO_PMS=m
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_W9966=m
+CONFIG_VIDEO_CPIA=m
+CONFIG_VIDEO_CPIA_PP=m
+CONFIG_VIDEO_CPIA_USB=m
+CONFIG_VIDEO_SAA5246A=m
+CONFIG_VIDEO_SAA5249=m
+CONFIG_TUNER_3036=m
+CONFIG_VIDEO_STRADIS=m
+CONFIG_VIDEO_ZORAN=m
+CONFIG_VIDEO_ZORAN_BUZ=m
+CONFIG_VIDEO_ZORAN_DC10=m
+CONFIG_VIDEO_ZORAN_DC30=m
+CONFIG_VIDEO_ZORAN_LML33=m
+CONFIG_VIDEO_ZORAN_LML33R10=m
+CONFIG_VIDEO_MEYE=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_MXB=m
+CONFIG_VIDEO_DPC=m
+CONFIG_VIDEO_HEXIUM_ORION=m
+CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_CX88=m
+# CONFIG_VIDEO_OVCAMCHIP is not set
+
+#
+# Radio Adapters
+#
+CONFIG_RADIO_CADET=m
+CONFIG_RADIO_RTRACK=m
+CONFIG_RADIO_RTRACK2=m
+CONFIG_RADIO_AZTECH=m
+CONFIG_RADIO_GEMTEK=m
+CONFIG_RADIO_GEMTEK_PCI=m
+CONFIG_RADIO_MAXIRADIO=m
+CONFIG_RADIO_MAESTRO=m
+CONFIG_RADIO_SF16FMI=m
+CONFIG_RADIO_SF16FMR2=m
+CONFIG_RADIO_TERRATEC=m
+CONFIG_RADIO_TRUST=m
+CONFIG_RADIO_TYPHOON=m
+CONFIG_RADIO_TYPHOON_PROC_FS=y
+CONFIG_RADIO_ZOLTRIX=m
+
+#
+# Digital Video Broadcasting Devices
+#
+CONFIG_DVB=y
+CONFIG_DVB_CORE=m
+
+#
+# Supported Frontend Modules
+#
+CONFIG_DVB_TWINHAN_DST=m
+CONFIG_DVB_STV0299=m
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_ALPS_TDLB7 is not set
+CONFIG_DVB_ALPS_TDMB7=m
+CONFIG_DVB_ATMEL_AT76C651=m
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_GRUNDIG_29504_491=m
+CONFIG_DVB_GRUNDIG_29504_401=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_VES1X93=m
+# CONFIG_DVB_TDA1004X is not set
+CONFIG_DVB_NXT6000=m
+
+#
+# Supported SAA7146 based PCI Adapters
+#
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+CONFIG_DVB_B2C2_SKYSTAR=m
+
+#
+# Supported BT878 Adapters
+#
+CONFIG_DVB_BT8XX=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_VIDEO_VIDEOBUF=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_BUF=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_IR=m
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+CONFIG_FB_VGA16=m
+CONFIG_FB_VESA=y
+CONFIG_VIDEO_SELECT=y
+CONFIG_FB_HGA=m
+# CONFIG_FB_HGA_ACCEL is not set
+CONFIG_FB_RIVA=m
+# CONFIG_FB_RIVA_I2C is not set
+CONFIG_FB_I810=m
+CONFIG_FB_I810_GTF=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G450=y
+CONFIG_FB_MATROX_G100=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
+CONFIG_FB_MATROX_MULTIHEAD=y
+# CONFIG_FB_RADEON_OLD is not set
+CONFIG_FB_RADEON=m
+CONFIG_FB_RADEON_I2C=y
+# CONFIG_FB_RADEON_DEBUG is not set
+CONFIG_FB_ATY128=m
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_ATY_XL_INIT is not set
+# CONFIG_FB_SIS is not set
+CONFIG_FB_NEOMAGIC=m
+CONFIG_FB_KYRO=m
+CONFIG_FB_3DFX=m
+# CONFIG_FB_3DFX_ACCEL is not set
+CONFIG_FB_VOODOO1=m
+CONFIG_FB_TRIDENT=m
+# CONFIG_FB_TRIDENT_ACCEL is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_MDA_CONSOLE=m
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_RTCTIMER=m
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
+CONFIG_SND_OPL4_LIB=m
+CONFIG_SND_VX_LIB=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+# CONFIG_SND_SERIAL_U16550 is not set
+CONFIG_SND_MPU401=m
+
+#
+# ISA devices
+#
+CONFIG_SND_AD1816A=m
+CONFIG_SND_AD1848=m
+CONFIG_SND_CS4231=m
+CONFIG_SND_CS4232=m
+CONFIG_SND_CS4236=m
+CONFIG_SND_ES968=m
+CONFIG_SND_ES1688=m
+CONFIG_SND_ES18XX=m
+CONFIG_SND_GUSCLASSIC=m
+CONFIG_SND_GUSEXTREME=m
+CONFIG_SND_GUSMAX=m
+CONFIG_SND_INTERWAVE=m
+CONFIG_SND_INTERWAVE_STB=m
+CONFIG_SND_OPTI92X_AD1848=m
+CONFIG_SND_OPTI92X_CS4231=m
+CONFIG_SND_OPTI93X=m
+CONFIG_SND_SB8=m
+CONFIG_SND_SB16=m
+CONFIG_SND_SBAWE=m
+CONFIG_SND_SB16_CSP=y
+# CONFIG_SND_WAVEFRONT is not set
+CONFIG_SND_ALS100=m
+CONFIG_SND_AZT2320=m
+CONFIG_SND_CMI8330=m
+CONFIG_SND_DT019X=m
+CONFIG_SND_OPL3SA2=m
+CONFIG_SND_SGALAXY=m
+CONFIG_SND_SSCAPE=m
+
+#
+# PCI devices
+#
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_ALI5451=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_AZT3328=m
+CONFIG_SND_BT87X=m
+CONFIG_SND_CS46XX=m
+CONFIG_SND_CS46XX_NEW_DSP=y
+CONFIG_SND_CS4281=m
+CONFIG_SND_EMU10K1=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+CONFIG_SND_RME32=m
+CONFIG_SND_RME96=m
+CONFIG_SND_RME9652=m
+CONFIG_SND_HDSP=m
+CONFIG_SND_TRIDENT=m
+CONFIG_SND_YMFPCI=m
+CONFIG_SND_ALS4000=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_ES1938=m
+CONFIG_SND_ES1968=m
+CONFIG_SND_MAESTRO3=m
+CONFIG_SND_FM801=m
+CONFIG_SND_FM801_TEA575X=m
+CONFIG_SND_ICE1712=m
+CONFIG_SND_ICE1724=m
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_SONICVIBES=m
+CONFIG_SND_VIA82XX=m
+CONFIG_SND_VX222=m
+
+#
+# ALSA USB devices
+#
+CONFIG_SND_USB_AUDIO=m
+
+#
+# PCMCIA devices
+#
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_VXP440 is not set
+CONFIG_SND_PDAUDIOCF=m
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
+#
+# USB support
+#
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_SPLIT_ISO=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_UHCI_HCD=m
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_AUDIO=m
+
+#
+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
+#
+CONFIG_USB_MIDI=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_RW_DETECT is not set
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_HP8200e=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+
+#
+# USB Human Interface Devices (HID)
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+CONFIG_HID_FF=y
+CONFIG_HID_PID=y
+CONFIG_LOGITECH_FF=y
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_AIPTEK=m
+CONFIG_USB_WACOM=m
+CONFIG_USB_KBTAB=m
+CONFIG_USB_POWERMATE=m
+CONFIG_USB_MTOUCH=m
+# CONFIG_USB_EGALAX is not set
+CONFIG_USB_XPAD=m
+CONFIG_USB_ATI_REMOTE=m
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_HPUSBSCSI=m
+
+#
+# USB Multimedia devices
+#
+CONFIG_USB_DABUSB=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_DSBR=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_OV511=m
+# CONFIG_USB_PWC is not set
+CONFIG_USB_SE401=m
+# CONFIG_USB_SN9C102 is not set
+CONFIG_USB_STV680=m
+
+#
+# USB Network adaptors
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+
+#
+# USB Host-to-Host Cables
+#
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_GENESYS=y
+CONFIG_USB_NET1080=y
+CONFIG_USB_PL2301=y
+
+#
+# Intelligent USB Devices/Gadgets
+#
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_ZAURUS=y
+CONFIG_USB_CDCETHER=y
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_AX8817X=y
+
+#
+# USB port drivers
+#
+CONFIG_USB_USS720=m
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+# CONFIG_USB_EMI26 is not set
+CONFIG_USB_TIGL=m
+CONFIG_USB_AUERSWALD=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYTHERM=m
+# CONFIG_USB_PHIDGETSERVO is not set
+CONFIG_USB_TEST=m
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=m
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_SECURITY=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+CONFIG_DEVPTS_FS_SECURITY=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+CONFIG_RELAYFS_FS=y
+CONFIG_KLOG_CHANNEL=y
+CONFIG_KLOG_CHANNEL_AUTOENABLE=y
+CONFIG_KLOG_CHANNEL_SHIFT=21
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+# CONFIG_BEFS_DEBUG is not set
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_NAND=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+# CONFIG_HPFS_FS is not set
+CONFIG_QNX4FS_FS=m
+# CONFIG_QNX4FS_RW is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_POSIX is not set
+CONFIG_NCP_FS=m
+CONFIG_NCPFS_PACKET_SIGNING=y
+CONFIG_NCPFS_IOCTL_LOCKING=y
+CONFIG_NCPFS_STRONG=y
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+CONFIG_NCPFS_SMALLDOS=y
+CONFIG_NCPFS_NLS=y
+CONFIG_NCPFS_EXTRAS=y
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_DEBUG_HIGHMEM=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_FRAME_POINTER is not set
+
+#
+# Linux VServer
+#
+CONFIG_VSERVER_LEGACY=y
+CONFIG_PROC_SECURE=y
+# CONFIG_VSERVER_HARDCPU is not set
+# CONFIG_INOXID_NONE is not set
+# CONFIG_INOXID_GID16 is not set
+CONFIG_INOXID_GID24=y
+# CONFIG_INOXID_GID32 is not set
+# CONFIG_INOXID_MAGIC is not set
+
+#
+# Security options
+#
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_ROOTPLUG is not set
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+# CONFIG_SECURITY_SELINUX_MLS is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_SIGNATURE is not set
+# CONFIG_CRYPTO_MPILIB is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_MSI=y
+# CONFIG_PCI_USE_VECTOR is not set
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CMDLINE_PARTS=m
#
# User Modules And Translation Layers
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_GEN_PROBE=m
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_MTD_ELAN_104NC=m
CONFIG_MTD_SCx200_DOCFLASH=m
CONFIG_MTD_AMD76XROM=m
-# CONFIG_MTD_ICHXROM is not set
+CONFIG_MTD_ICH2ROM=m
CONFIG_MTD_SCB2_FLASH=m
# CONFIG_MTD_NETtel is not set
# CONFIG_MTD_DILNETPC is not set
# CONFIG_MTD_PMC551_BUGFIX is not set
# CONFIG_MTD_PMC551_DEBUG is not set
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_MTDRAM=m
CONFIG_MTDRAM_TOTAL_SIZE=4096
CONFIG_MTDRAM_ERASE_SIZE=128
# CONFIG_MTD_DOC2001 is not set
CONFIG_MTD_DOC2001PLUS=m
CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
# CONFIG_MTD_DOCPROBE_ADVANCED is not set
CONFIG_MTD_DOCPROBE_ADDRESS=0
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
#
# Parallel port support
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_NAND=y
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CMDLINE_PARTS=m
#
# User Modules And Translation Layers
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_GEN_PROBE=m
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_MTD_ELAN_104NC=m
CONFIG_MTD_SCx200_DOCFLASH=m
CONFIG_MTD_AMD76XROM=m
-# CONFIG_MTD_ICHXROM is not set
+CONFIG_MTD_ICH2ROM=m
CONFIG_MTD_SCB2_FLASH=m
# CONFIG_MTD_NETtel is not set
# CONFIG_MTD_DILNETPC is not set
# CONFIG_MTD_PMC551_BUGFIX is not set
# CONFIG_MTD_PMC551_DEBUG is not set
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_MTDRAM=m
CONFIG_MTDRAM_TOTAL_SIZE=4096
CONFIG_MTDRAM_ERASE_SIZE=128
# CONFIG_MTD_DOC2001 is not set
CONFIG_MTD_DOC2001PLUS=m
CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
# CONFIG_MTD_DOCPROBE_ADVANCED is not set
CONFIG_MTD_DOCPROBE_ADDRESS=0
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
#
# Parallel port support
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_NAND=y
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_MSI=y
+# CONFIG_PCI_USE_VECTOR is not set
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CMDLINE_PARTS=m
#
# User Modules And Translation Layers
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_GEN_PROBE=m
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_MTD_ELAN_104NC=m
CONFIG_MTD_SCx200_DOCFLASH=m
CONFIG_MTD_AMD76XROM=m
-# CONFIG_MTD_ICHXROM is not set
+CONFIG_MTD_ICH2ROM=m
CONFIG_MTD_SCB2_FLASH=m
# CONFIG_MTD_NETtel is not set
# CONFIG_MTD_DILNETPC is not set
# CONFIG_MTD_PMC551_BUGFIX is not set
# CONFIG_MTD_PMC551_DEBUG is not set
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_MTDRAM=m
CONFIG_MTDRAM_TOTAL_SIZE=4096
CONFIG_MTDRAM_ERASE_SIZE=128
# CONFIG_MTD_DOC2001 is not set
CONFIG_MTD_DOC2001PLUS=m
CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
# CONFIG_MTD_DOCPROBE_ADVANCED is not set
CONFIG_MTD_DOCPROBE_ADDRESS=0
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
#
# Parallel port support
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_NAND=y
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CMDLINE_PARTS=m
#
# User Modules And Translation Layers
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_GEN_PROBE=m
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_MTD_ELAN_104NC=m
CONFIG_MTD_SCx200_DOCFLASH=m
CONFIG_MTD_AMD76XROM=m
-# CONFIG_MTD_ICHXROM is not set
+CONFIG_MTD_ICH2ROM=m
CONFIG_MTD_SCB2_FLASH=m
# CONFIG_MTD_NETtel is not set
# CONFIG_MTD_DILNETPC is not set
# CONFIG_MTD_PMC551_BUGFIX is not set
# CONFIG_MTD_PMC551_DEBUG is not set
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_MTDRAM=m
CONFIG_MTDRAM_TOTAL_SIZE=4096
CONFIG_MTDRAM_ERASE_SIZE=128
# CONFIG_MTD_DOC2001 is not set
CONFIG_MTD_DOC2001PLUS=m
CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
# CONFIG_MTD_DOCPROBE_ADVANCED is not set
CONFIG_MTD_DOCPROBE_ADDRESS=0
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
#
# Parallel port support
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_NAND=y
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_X86=y
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-
-#
-# Class Based Kernel Resource Management
-#
-CONFIG_CKRM=y
-CONFIG_RCFS_FS=y
-CONFIG_CKRM_TYPE_TASKCLASS=y
-CONFIG_CKRM_RES_NUMTASKS=y
-CONFIG_CKRM_CPU_SCHEDULE=y
-CONFIG_CKRM_RES_BLKIO=y
-CONFIG_CKRM_RES_MEM=y
-# CONFIG_CKRM_MEM_LRUORDER_CHANGE is not set
-# CONFIG_CKRM_TYPE_SOCKETCLASS is not set
-CONFIG_CKRM_RBCE=y
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_HOTPLUG is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_OOM_PANIC=y
-# CONFIG_EMBEDDED is not set
-# CONFIG_DELAY_ACCT is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_KALLSYMS_EXTRA_PASS=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SIG is not set
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-CONFIG_X86_PC=y
-# CONFIG_X86_ELAN is not set
-# CONFIG_X86_VOYAGER is not set
-# CONFIG_X86_NUMAQ is not set
-# CONFIG_X86_SUMMIT is not set
-# CONFIG_X86_BIGSMP is not set
-# CONFIG_X86_VISWS is not set
-# CONFIG_X86_GENERICARCH is not set
-# CONFIG_X86_ES7000 is not set
-# CONFIG_M386 is not set
-# CONFIG_M486 is not set
-# CONFIG_M586 is not set
-# CONFIG_M586TSC is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-CONFIG_MPENTIUMIII=y
-# CONFIG_MPENTIUMM is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-CONFIG_X86_GENERIC=y
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_4G=y
-CONFIG_X86_SWITCH_PAGETABLES=y
-CONFIG_X86_4G_VM_LAYOUT=y
-CONFIG_X86_UACCESS_INDIRECT=y
-CONFIG_X86_HIGH_ENTRY=y
-CONFIG_HPET_TIMER=y
-CONFIG_HPET_EMULATE_RTC=y
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_PREEMPT_VOLUNTARY is not set
-# CONFIG_X86_UP_APIC is not set
-CONFIG_X86_TSC=y
-CONFIG_X86_MCE=y
-# CONFIG_X86_MCE_NONFATAL is not set
-CONFIG_TOSHIBA=m
-CONFIG_I8K=m
-CONFIG_MICROCODE=m
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
-
-#
-# Firmware Drivers
-#
-CONFIG_EDD=m
-# CONFIG_NOHIGHMEM is not set
-CONFIG_HIGHMEM4G=y
-# CONFIG_HIGHMEM64G is not set
-CONFIG_HIGHMEM=y
-CONFIG_HIGHPTE=y
-# CONFIG_MATH_EMULATION is not set
-CONFIG_MTRR=y
-# CONFIG_EFI is not set
-CONFIG_REGPARM=y
-
-#
-# Power management options (ACPI, APM)
-#
-CONFIG_PM=y
-# CONFIG_SOFTWARE_SUSPEND is not set
-# CONFIG_PM_DISK is not set
-
-#
-# ACPI (Advanced Configuration and Power Interface) Support
-#
-CONFIG_ACPI=y
-CONFIG_ACPI_BOOT=y
-CONFIG_ACPI_INTERPRETER=y
-CONFIG_ACPI_SLEEP=y
-CONFIG_ACPI_SLEEP_PROC_FS=y
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_FAN=y
-CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_THERMAL=y
-CONFIG_ACPI_ASUS=m
-CONFIG_ACPI_TOSHIBA=m
-# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_BUS=y
-CONFIG_ACPI_EC=y
-CONFIG_ACPI_POWER=y
-CONFIG_ACPI_PCI=y
-CONFIG_ACPI_SYSTEM=y
-CONFIG_X86_PM_TIMER=y
-
-#
-# APM (Advanced Power Management) BIOS Support
-#
-CONFIG_APM=m
-# CONFIG_APM_IGNORE_USER_SUSPEND is not set
-# CONFIG_APM_DO_ENABLE is not set
-CONFIG_APM_CPU_IDLE=y
-# CONFIG_APM_DISPLAY_BLANK is not set
-CONFIG_APM_RTC_IS_GMT=y
-# CONFIG_APM_ALLOW_INTS is not set
-# CONFIG_APM_REAL_MODE_POWER_OFF is not set
-
-#
-# CPU Frequency scaling
-#
-CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_PROC_INTF is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-# CONFIG_CPU_FREQ_24_API is not set
-CONFIG_CPU_FREQ_TABLE=y
-
-#
-# CPUFreq processor drivers
-#
-CONFIG_X86_ACPI_CPUFREQ=m
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
-CONFIG_X86_POWERNOW_K6=m
-CONFIG_X86_POWERNOW_K7=m
-CONFIG_X86_POWERNOW_K8=m
-# CONFIG_X86_GX_SUSPMOD is not set
-CONFIG_X86_SPEEDSTEP_CENTRINO=m
-CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
-CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI=y
-CONFIG_X86_SPEEDSTEP_ICH=m
-CONFIG_X86_SPEEDSTEP_SMI=m
-CONFIG_X86_P4_CLOCKMOD=m
-CONFIG_X86_SPEEDSTEP_LIB=m
-# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
-CONFIG_X86_LONGRUN=m
-CONFIG_X86_LONGHAUL=m
-
-#
-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-#
-CONFIG_PCI=y
-# CONFIG_PCI_GOBIOS is not set
-# CONFIG_PCI_GOMMCONFIG is not set
-# CONFIG_PCI_GODIRECT is not set
-CONFIG_PCI_GOANY=y
-CONFIG_PCI_BIOS=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_LEGACY_PROC=y
-# CONFIG_PCI_NAMES is not set
-CONFIG_ISA=y
-# CONFIG_EISA is not set
-# CONFIG_MCA is not set
-# CONFIG_SCx200 is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-CONFIG_BINFMT_MISC=y
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-CONFIG_MTD=m
-# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-
-#
-# RAM/ROM/Flash chip drivers
-#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-
-#
-# Mapping drivers for chip access
-#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-# CONFIG_MTD_PHYSMAP is not set
-# CONFIG_MTD_PNC2000 is not set
-CONFIG_MTD_SC520CDP=m
-CONFIG_MTD_NETSC520=m
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_ELAN_104NC=m
-CONFIG_MTD_SCx200_DOCFLASH=m
-# CONFIG_MTD_AMD76XROM is not set
-# CONFIG_MTD_ICHXROM is not set
-CONFIG_MTD_SCB2_FLASH=m
-# CONFIG_MTD_NETtel is not set
-# CONFIG_MTD_DILNETPC is not set
-# CONFIG_MTD_L440GX is not set
-CONFIG_MTD_PCI=m
-
-#
-# Self-contained MTD device drivers
-#
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-# CONFIG_MTD_BLKMTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-CONFIG_MTD_DOC2000=m
-# CONFIG_MTD_DOC2001 is not set
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-CONFIG_MTD_DOCPROBE_ADDRESS=0
-
-#
-# NAND Flash Device Drivers
-#
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
-
-#
-# Protocols
-#
-CONFIG_ISAPNP=y
-# CONFIG_PNPBIOS is not set
-
-#
-# Block devices
-#
-CONFIG_BLK_DEV_FD=m
-# CONFIG_BLK_DEV_XD is not set
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_LBD=y
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-# CONFIG_BLK_DEV_HD_IDE is not set
-CONFIG_BLK_DEV_IDEDISK=y
-CONFIG_IDEDISK_MULTI_MODE=y
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_BLK_DEV_IDETAPE is not set
-CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
-# CONFIG_IDE_TASK_IOCTL is not set
-# CONFIG_IDE_TASKFILE_IO is not set
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=y
-# CONFIG_BLK_DEV_CMD640 is not set
-CONFIG_BLK_DEV_IDEPNP=y
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=y
-# CONFIG_BLK_DEV_OPTI621 is not set
-CONFIG_BLK_DEV_RZ1000=y
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_ADMA=y
-CONFIG_BLK_DEV_AEC62XX=y
-CONFIG_BLK_DEV_ALI15X3=y
-# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_ATIIXP=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_TRIFLEX=y
-CONFIG_BLK_DEV_CY82C693=y
-CONFIG_BLK_DEV_CS5520=y
-CONFIG_BLK_DEV_CS5530=y
-CONFIG_BLK_DEV_HPT34X=y
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=y
-# CONFIG_BLK_DEV_SC1200 is not set
-CONFIG_BLK_DEV_PIIX=y
-# CONFIG_BLK_DEV_NS87415 is not set
-CONFIG_BLK_DEV_PDC202XX_OLD=y
-# CONFIG_PDC202XX_BURST is not set
-CONFIG_BLK_DEV_PDC202XX_NEW=y
-CONFIG_PDC202XX_FORCE=y
-CONFIG_BLK_DEV_SVWKS=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_SIS5513=y
-CONFIG_BLK_DEV_SLC90E66=y
-# CONFIG_BLK_DEV_TRM290 is not set
-CONFIG_BLK_DEV_VIA82CXXX=y
-# CONFIG_IDE_ARM is not set
-# CONFIG_IDE_CHIPSETS is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=m
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=m
-
-#
-# SCSI low-level drivers
-#
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-# CONFIG_SCSI_7000FASST is not set
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AHA152X=m
-CONFIG_SCSI_AHA1542=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC7XXX_OLD=m
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=4
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-# CONFIG_SCSI_DPT_I2O is not set
-CONFIG_SCSI_IN2000=m
-CONFIG_SCSI_MEGARAID=m
-CONFIG_SCSI_SATA=y
-CONFIG_SCSI_SATA_SVW=m
-CONFIG_SCSI_ATA_PIIX=m
-CONFIG_SCSI_SATA_NV=m
-CONFIG_SCSI_SATA_PROMISE=m
-CONFIG_SCSI_SATA_SX4=m
-CONFIG_SCSI_SATA_SIL=m
-CONFIG_SCSI_SATA_SIS=m
-CONFIG_SCSI_SATA_VIA=m
-CONFIG_SCSI_SATA_VITESSE=m
-CONFIG_SCSI_BUSLOGIC=m
-# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_GDTH=m
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INIA100=m
-# CONFIG_SCSI_NCR53C406A is not set
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PSI240I is not set
-CONFIG_SCSI_QLOGIC_FAS=m
-CONFIG_SCSI_QLOGIC_ISP=m
-# CONFIG_SCSI_QLOGIC_FC is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA2XXX=m
-CONFIG_SCSI_QLA21XX=m
-CONFIG_SCSI_QLA22XX=m
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-CONFIG_SCSI_QLA6312=m
-CONFIG_SCSI_QLA6322=m
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC395x is not set
-CONFIG_SCSI_DC390T=m
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Old CD-ROM drivers (not SCSI, not IDE)
-#
-# CONFIG_CD_NO_IDESCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_RAID6=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_CRYPT is not set
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=m
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_ISENSE is not set
-CONFIG_FUSION_CTL=m
-
-#
-# IEEE 1394 (FireWire) support
-#
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-CONFIG_IEEE1394_OUI_DB=y
-# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
-
-#
-# Device Drivers
-#
-# CONFIG_IEEE1394_PCILYNX is not set
-CONFIG_IEEE1394_OHCI1394=m
-
-#
-# Protocol Drivers
-#
-# CONFIG_IEEE1394_VIDEO1394 is not set
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-# CONFIG_IEEE1394_ETH1394 is not set
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_CMP=m
-CONFIG_IEEE1394_AMDTP=m
-
-#
-# I2O device support
-#
-CONFIG_I2O=m
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_ACCEPT_QUEUES is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-CONFIG_ICMP_IPOD=y
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_LOCAL=y
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_REALM=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
-# CONFIG_NET_SCH_CBQ is not set
-CONFIG_NET_SCH_HTB=m
-# CONFIG_NET_SCH_HFSC is not set
-# CONFIG_NET_SCH_PRIO is not set
-# CONFIG_NET_SCH_RED is not set
-# CONFIG_NET_SCH_SFQ is not set
-# CONFIG_NET_SCH_TEQL is not set
-# CONFIG_NET_SCH_TBF is not set
-# CONFIG_NET_SCH_GRED is not set
-# CONFIG_NET_SCH_DSMARK is not set
-# CONFIG_NET_SCH_NETEM is not set
-# CONFIG_NET_SCH_INGRESS is not set
-# CONFIG_NET_QOS is not set
-CONFIG_NET_CLS=y
-# CONFIG_NET_CLS_TCINDEX is not set
-# CONFIG_NET_CLS_ROUTE4 is not set
-CONFIG_NET_CLS_ROUTE=y
-CONFIG_NET_CLS_FW=m
-# CONFIG_NET_CLS_U32 is not set
-# CONFIG_NET_CLS_IND is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_TUX is not set
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_NET_SB1000 is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_EL1=m
-CONFIG_EL2=m
-CONFIG_ELPLUS=m
-CONFIG_EL16=m
-CONFIG_EL3=m
-CONFIG_3C515=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_LANCE=m
-CONFIG_NET_VENDOR_SMC=y
-CONFIG_WD80x3=m
-CONFIG_ULTRA=m
-CONFIG_SMC9194=m
-CONFIG_NET_VENDOR_RACAL=y
-# CONFIG_NI5010 is not set
-CONFIG_NI52=m
-CONFIG_NI65=m
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=m
-# CONFIG_TULIP_MWI is not set
-CONFIG_TULIP_MMIO=y
-# CONFIG_TULIP_NAPI is not set
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-CONFIG_DM9102=m
-# CONFIG_AT1700 is not set
-CONFIG_DEPCA=m
-CONFIG_HP100=m
-# CONFIG_NET_ISA is not set
-CONFIG_NE2000=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-CONFIG_AMD8111_ETH=m
-CONFIG_AMD8111E_NAPI=y
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_ADAPTEC_STARFIRE_NAPI=y
-CONFIG_AC3200=m
-CONFIG_APRICOT=m
-CONFIG_B44=m
-CONFIG_FORCEDETH=m
-CONFIG_CS89x0=m
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
-# CONFIG_EEPRO100_PIO is not set
-CONFIG_E100=m
-CONFIG_E100_NAPI=y
-CONFIG_FEALNX=m
-CONFIG_NATSEMI=m
-CONFIG_NE2K_PCI=m
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-CONFIG_8139TOO_PIO=y
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-CONFIG_8139TOO_8129=y
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_SIS900=m
-CONFIG_EPIC100=m
-CONFIG_SUNDANCE=m
-# CONFIG_SUNDANCE_MMIO is not set
-CONFIG_TLAN=m
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=m
-CONFIG_NET_POCKET=y
-CONFIG_ATP=m
-CONFIG_DE600=m
-CONFIG_DE620=m
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=m
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_E1000_NAPI=y
-CONFIG_NS83820=m
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_R8169=m
-CONFIG_SK98LIN=m
-CONFIG_TIGON3=m
-
-#
-# Ethernet (10000 Mbit)
-#
-CONFIG_IXGB=m
-CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_INPORT is not set
-# CONFIG_MOUSE_LOGIBM is not set
-# CONFIG_MOUSE_PC110PAD is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_ACPI is not set
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_MULTIPORT=y
-CONFIG_SERIAL_8250_RSA=y
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-# CONFIG_CRASH is not set
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-CONFIG_IPMI_HANDLER=m
-# CONFIG_IPMI_PANIC_EVENT is not set
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-CONFIG_HW_RANDOM=m
-CONFIG_NVRAM=m
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-CONFIG_AGP=m
-CONFIG_AGP_ALI=m
-CONFIG_AGP_ATI=m
-CONFIG_AGP_AMD=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_INTEL_MCH=m
-CONFIG_AGP_NVIDIA=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_SWORKS=m
-CONFIG_AGP_VIA=m
-CONFIG_AGP_EFFICEON=m
-CONFIG_DRM=y
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_GAMMA=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_I810=m
-CONFIG_DRM_I830=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_SIS=m
-CONFIG_MWAVE=m
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_HPET is not set
-CONFIG_HANGCHECK_TIMER=m
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCF=m
-
-#
-# I2C Hardware Bus support
-#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD8111=m
-# CONFIG_I2C_ELEKTOR is not set
-# CONFIG_I2C_I801 is not set
-CONFIG_I2C_I810=m
-CONFIG_I2C_ISA=m
-CONFIG_I2C_NFORCE2=m
-# CONFIG_I2C_PARPORT_LIGHT is not set
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
-# CONFIG_SCx200_ACB is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
-
-#
-# Hardware Sensors Chip support
-#
-CONFIG_I2C_SENSOR=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83627HF=m
-
-#
-# Other I2C Chip support
-#
-CONFIG_SENSORS_EEPROM=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_RTC8564=m
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-CONFIG_IBM_ASM=m
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-
-#
-# Video For Linux
-#
-
-#
-# Video Adapters
-#
-# CONFIG_VIDEO_BT848 is not set
-CONFIG_VIDEO_PMS=m
-CONFIG_VIDEO_CPIA=m
-# CONFIG_VIDEO_CPIA_USB is not set
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-CONFIG_VIDEO_STRADIS=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_OVCAMCHIP=m
-
-#
-# Radio Adapters
-#
-CONFIG_RADIO_CADET=m
-CONFIG_RADIO_RTRACK=m
-CONFIG_RADIO_RTRACK2=m
-CONFIG_RADIO_AZTECH=m
-CONFIG_RADIO_GEMTEK=m
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
-CONFIG_RADIO_SF16FMI=m
-CONFIG_RADIO_SF16FMR2=m
-CONFIG_RADIO_TERRATEC=m
-CONFIG_RADIO_TRUST=m
-CONFIG_RADIO_TYPHOON=m
-CONFIG_RADIO_TYPHOON_PROC_FS=y
-CONFIG_RADIO_ZOLTRIX=m
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_VIDEOBUF=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR=m
-
-#
-# Graphics support
-#
-CONFIG_FB=y
-CONFIG_FB_CIRRUS=m
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_VGA16=m
-CONFIG_FB_VESA=y
-CONFIG_VIDEO_SELECT=y
-CONFIG_FB_HGA=m
-CONFIG_FB_HGA_ACCEL=y
-CONFIG_FB_RIVA=m
-# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_I810=m
-CONFIG_FB_I810_GTF=y
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G450=y
-CONFIG_FB_MATROX_G100=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_MATROX_MULTIHEAD=y
-# CONFIG_FB_RADEON_OLD is not set
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GX=y
-# CONFIG_FB_ATY_XL_INIT is not set
-# CONFIG_FB_SIS is not set
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-CONFIG_FB_3DFX_ACCEL=y
-CONFIG_FB_VOODOO1=m
-CONFIG_FB_TRIDENT=m
-CONFIG_FB_TRIDENT_ACCEL=y
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-CONFIG_MDA_CONSOLE=m
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-
-#
-# Logo configuration
-#
-# CONFIG_LOGO is not set
-
-#
-# Sound
-#
-CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_RAWMIDI=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_OSSEMUL=y
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_RTCTIMER=m
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL4_LIB=m
-CONFIG_SND_VX_LIB=m
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_MTPAV=m
-# CONFIG_SND_SERIAL_U16550 is not set
-CONFIG_SND_MPU401=m
-
-#
-# ISA devices
-#
-CONFIG_SND_AD1816A=m
-CONFIG_SND_AD1848=m
-CONFIG_SND_CS4231=m
-CONFIG_SND_CS4232=m
-CONFIG_SND_CS4236=m
-CONFIG_SND_ES968=m
-CONFIG_SND_ES1688=m
-CONFIG_SND_ES18XX=m
-CONFIG_SND_GUSCLASSIC=m
-CONFIG_SND_GUSEXTREME=m
-CONFIG_SND_GUSMAX=m
-CONFIG_SND_INTERWAVE=m
-CONFIG_SND_INTERWAVE_STB=m
-CONFIG_SND_OPTI92X_AD1848=m
-CONFIG_SND_OPTI92X_CS4231=m
-CONFIG_SND_OPTI93X=m
-CONFIG_SND_SB8=m
-CONFIG_SND_SB16=m
-CONFIG_SND_SBAWE=m
-CONFIG_SND_SB16_CSP=y
-# CONFIG_SND_WAVEFRONT is not set
-CONFIG_SND_ALS100=m
-CONFIG_SND_AZT2320=m
-CONFIG_SND_CMI8330=m
-CONFIG_SND_DT019X=m
-CONFIG_SND_OPL3SA2=m
-CONFIG_SND_SGALAXY=m
-CONFIG_SND_SSCAPE=m
-
-#
-# PCI devices
-#
-CONFIG_SND_AC97_CODEC=m
-CONFIG_SND_ALI5451=m
-CONFIG_SND_ATIIXP=m
-CONFIG_SND_AU8810=m
-CONFIG_SND_AU8820=m
-CONFIG_SND_AU8830=m
-CONFIG_SND_AZT3328=m
-CONFIG_SND_BT87X=m
-CONFIG_SND_CS46XX=m
-CONFIG_SND_CS46XX_NEW_DSP=y
-CONFIG_SND_CS4281=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_KORG1212=m
-CONFIG_SND_MIXART=m
-CONFIG_SND_NM256=m
-CONFIG_SND_RME32=m
-CONFIG_SND_RME96=m
-CONFIG_SND_RME9652=m
-CONFIG_SND_HDSP=m
-CONFIG_SND_TRIDENT=m
-CONFIG_SND_YMFPCI=m
-CONFIG_SND_ALS4000=m
-CONFIG_SND_CMIPCI=m
-CONFIG_SND_ENS1370=m
-CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
-CONFIG_SND_MAESTRO3=m
-CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X=m
-CONFIG_SND_ICE1712=m
-CONFIG_SND_ICE1724=m
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-CONFIG_SND_SONICVIBES=m
-CONFIG_SND_VIA82XX=m
-CONFIG_SND_VX222=m
-
-#
-# ALSA USB devices
-#
-# CONFIG_SND_USB_AUDIO is not set
-
-#
-# Open Sound System
-#
-# CONFIG_SOUND_PRIME is not set
-
-#
-# USB support
-#
-CONFIG_USB=m
-# CONFIG_USB_DEBUG is not set
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_BANDWIDTH is not set
-# CONFIG_USB_DYNAMIC_MINORS is not set
-
-#
-# USB Host Controller Drivers
-#
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_SPLIT_ISO=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_UHCI_HCD=m
-
-#
-# USB Device Class drivers
-#
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
-# CONFIG_USB_MIDI is not set
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_PRINTER is not set
-CONFIG_USB_STORAGE=m
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_RW_DETECT=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_HP8200e=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-
-#
-# USB Human Interface Devices (HID)
-#
-# CONFIG_USB_HID is not set
-
-#
-# USB HID Boot Protocol drivers
-#
-# CONFIG_USB_KBD is not set
-# CONFIG_USB_MOUSE is not set
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
-
-#
-# USB Imaging devices
-#
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
-# CONFIG_USB_HPUSBSCSI is not set
-
-#
-# USB Multimedia devices
-#
-# CONFIG_USB_DABUSB is not set
-# CONFIG_USB_VICAM is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_IBMCAM is not set
-# CONFIG_USB_KONICAWC is not set
-# CONFIG_USB_OV511 is not set
-# CONFIG_USB_PWC is not set
-# CONFIG_USB_SE401 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_STV680 is not set
-# CONFIG_USB_W9968CF is not set
-
-#
-# USB Network adaptors
-#
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-
-#
-# USB Host-to-Host Cables
-#
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_GENESYS=y
-CONFIG_USB_NET1080=y
-CONFIG_USB_PL2301=y
-
-#
-# Intelligent USB Devices/Gadgets
-#
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_ZAURUS=y
-CONFIG_USB_CDCETHER=y
-
-#
-# USB Network Adapters
-#
-CONFIG_USB_AX8817X=y
-
-#
-# USB port drivers
-#
-
-#
-# USB Serial Converter support
-#
-# CONFIG_USB_SERIAL is not set
-
-#
-# USB Miscellaneous drivers
-#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_TIGL is not set
-# CONFIG_USB_AUERSWALD is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_LEGOTOWER is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
-# CONFIG_USB_TEST is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_QUOTA=y
-# CONFIG_QFMT_V1 is not set
-CONFIG_QFMT_V2=y
-CONFIG_QUOTACTL=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS_XATTR=y
-CONFIG_DEVPTS_FS_SECURITY=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_EXPORTFS is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=m
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-CONFIG_CRASH_DUMP=y
-CONFIG_CRASH_DUMP_BLOCKDEV=y
-# CONFIG_CRASH_DUMP_NETDEV is not set
-# CONFIG_CRASH_DUMP_MEMDEV is not set
-# CONFIG_CRASH_DUMP_COMPRESS_RLE is not set
-# CONFIG_CRASH_DUMP_COMPRESS_GZIP is not set
-CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
-# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_FRAME_POINTER is not set
-
-#
-# Linux VServer
-#
-CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
-# CONFIG_VSERVER_HARDCPU is not set
-# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
-# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-CONFIG_VSERVER_DEBUG=y
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-CONFIG_CRC_CCITT=m
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_PC=y
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_USERMODE=y
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-
-#
-# UML-specific options
-#
-CONFIG_MODE_TT=y
-CONFIG_MODE_SKAS=y
-CONFIG_NET=y
-CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_MISC=m
-CONFIG_EXTERNFS=y
-CONFIG_HOSTFS=y
-# CONFIG_HUMFS is not set
-# CONFIG_HPPFS is not set
-CONFIG_MCONSOLE=y
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_HOST_2G_2G is not set
-# CONFIG_UML_SMP is not set
-# CONFIG_SMP is not set
-CONFIG_NEST_LEVEL=0
-CONFIG_KERNEL_HALF_GIGS=1
-# CONFIG_HIGHMEM is not set
-CONFIG_PROC_MM=y
-CONFIG_KERNEL_STACK_ORDER=2
-CONFIG_UML_REAL_TIME_CLOCK=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-
-#
-# Class Based Kernel Resource Management
-#
-# CONFIG_CKRM is not set
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-# CONFIG_EMBEDDED is not set
-# CONFIG_DELAY_ACCT is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SIG is not set
-CONFIG_KMOD=y
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-
-#
-# Character Devices
-#
-CONFIG_STDIO_CONSOLE=y
-CONFIG_SSL=y
-CONFIG_FD_CHAN=y
-CONFIG_NULL_CHAN=y
-CONFIG_PORT_CHAN=y
-CONFIG_PTY_CHAN=y
-CONFIG_TTY_CHAN=y
-CONFIG_XTERM_CHAN=y
-CONFIG_CON_ZERO_CHAN="fd:0,fd:1"
-CONFIG_CON_CHAN="xterm"
-CONFIG_SSL_CHAN="pty"
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-CONFIG_SOFT_WATCHDOG=y
-CONFIG_UML_WATCHDOG=y
-CONFIG_UML_SOUND=y
-CONFIG_SOUND=y
-CONFIG_HOSTAUDIO=y
-
-#
-# Block Devices
-#
-CONFIG_BLK_DEV_UBD=y
-# CONFIG_BLK_DEV_UBD_SYNC is not set
-CONFIG_BLK_DEV_COW_COMMON=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_MMAPPER is not set
-CONFIG_NETDEVICES=y
-
-#
-# UML Network Devices
-#
-CONFIG_UML_NET=y
-CONFIG_UML_NET_ETHERTAP=y
-CONFIG_UML_NET_TUNTAP=y
-CONFIG_UML_NET_SLIP=y
-CONFIG_UML_NET_DAEMON=y
-CONFIG_UML_NET_MCAST=y
-# CONFIG_UML_NET_PCAP is not set
-CONFIG_UML_NET_SLIRP=y
-
-#
-# Networking support
-#
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_ACCEPT_QUEUES is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-CONFIG_ICMP_IPOD=y
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_LOCAL=y
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_REALM=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
-# CONFIG_NET_SCH_CBQ is not set
-CONFIG_NET_SCH_HTB=m
-# CONFIG_NET_SCH_HFSC is not set
-# CONFIG_NET_SCH_PRIO is not set
-# CONFIG_NET_SCH_RED is not set
-# CONFIG_NET_SCH_SFQ is not set
-# CONFIG_NET_SCH_TEQL is not set
-# CONFIG_NET_SCH_TBF is not set
-# CONFIG_NET_SCH_GRED is not set
-# CONFIG_NET_SCH_DSMARK is not set
-# CONFIG_NET_SCH_NETEM is not set
-# CONFIG_NET_SCH_INGRESS is not set
-# CONFIG_NET_QOS is not set
-# CONFIG_NET_CLS is not set
-CONFIG_NET_CLS_ROUTE=y
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_TUX is not set
-CONFIG_DUMMY=m
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-
-#
-# Ethernet (10 or 100Mbit)
-#
-# CONFIG_NET_ETHERNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_QUOTA=y
-# CONFIG_QFMT_V1 is not set
-CONFIG_QFMT_V2=y
-CONFIG_QUOTACTL=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-CONFIG_DEVFS_FS=y
-CONFIG_DEVFS_MOUNT=y
-# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS_XATTR=y
-CONFIG_DEVPTS_FS_SECURITY=y
-CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_EXPORTFS is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Linux VServer
-#
-CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
-CONFIG_VSERVER_HARDCPU=y
-# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
-# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-CONFIG_VSERVER_DEBUG=y
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
-CONFIG_ZLIB_INFLATE=y
-
-#
-# SCSI support
-#
-# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_POINTER=y
-# CONFIG_PT_PROXY is not set
-# CONFIG_GPROF is not set
-# CONFIG_GCOV is not set
config CRYPTO_AES
tristate "AES cipher algorithms"
- depends on CRYPTO && !(X86 && !X86_64)
+ depends on CRYPTO
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
See http://csrc.nist.gov/CryptoToolkit/aes/ for more information.
-config CRYPTO_AES_586
- tristate "AES cipher algorithms (i586)"
- depends on CRYPTO && (X86 && !X86_64)
- help
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See http://csrc.nist.gov/encryption/aes/ for more information.
-
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
depends on CRYPTO
WEP, but it should not be for other purposes because of the
weakness of the algorithm.
-config CRYPTO_KHAZAD
- tristate "Khazad cipher algorithm"
- depends on CRYPTO
- help
- Khazad cipher algorithm.
-
- Khazad was a finalist in the initial NESSIE competition. It is
- an algorithm optimized for 64-bit processors with good performance
- on 32-bit processors. Khazad uses an 128 bit key size.
-
- See also:
- http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html
-
config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
depends on CRYPTO
obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
obj-$(CONFIG_CRYPTO_TEA) += tea.o
-obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
{
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp_src[bsize];
- u8 tmp_dst[bsize];
+ u8 tmp_src[nbytes > src->length ? bsize : 0];
+ u8 tmp_dst[nbytes > dst->length ? bsize : 0];
if (!nbytes)
return 0;
+++ /dev/null
-/*
- * Cryptographic API.
- *
- * Khazad Algorithm
- *
- * The Khazad algorithm was developed by Paulo S. L. M. Barreto and
- * Vincent Rijmen. It was a finalist in the NESSIE encryption contest.
- *
- * The original authors have disclaimed all copyright interest in this
- * code and thus put it in the public domain. The subsequent authors
- * have put this under the GNU General Public License.
- *
- * By Aaron Grothe ajgrothe@yahoo.com, August 1, 2004
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <linux/crypto.h>
-
-#define KHAZAD_KEY_SIZE 16
-#define KHAZAD_BLOCK_SIZE 8
-#define KHAZAD_ROUNDS 8
-
-struct khazad_ctx {
- u64 E[KHAZAD_ROUNDS + 1];
- u64 D[KHAZAD_ROUNDS + 1];
-};
-
-static const u64 T0[256] = {
- 0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL,
- 0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL,
- 0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL,
- 0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL,
- 0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL,
- 0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL,
- 0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL,
- 0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL,
- 0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL,
- 0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL,
- 0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL,
- 0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL,
- 0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL,
- 0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL,
- 0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL,
- 0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL,
- 0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL,
- 0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL,
- 0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL,
- 0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL,
- 0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL,
- 0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL,
- 0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL,
- 0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL,
- 0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL,
- 0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL,
- 0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL,
- 0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL,
- 0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL,
- 0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL,
- 0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL,
- 0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL,
- 0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL,
- 0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL,
- 0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL,
- 0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL,
- 0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL,
- 0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL,
- 0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL,
- 0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL,
- 0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL,
- 0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL,
- 0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL,
- 0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL,
- 0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL,
- 0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL,
- 0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL,
- 0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL,
- 0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL,
- 0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL,
- 0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL,
- 0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL,
- 0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL,
- 0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL,
- 0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL,
- 0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL,
- 0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL,
- 0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL,
- 0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL,
- 0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL,
- 0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL,
- 0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL,
- 0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL,
- 0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL,
- 0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL,
- 0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL,
- 0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL,
- 0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL,
- 0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL,
- 0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL,
- 0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL,
- 0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL,
- 0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL,
- 0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL,
- 0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL,
- 0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL,
- 0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL,
- 0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL,
- 0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL,
- 0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL,
- 0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL,
- 0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL,
- 0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL,
- 0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL,
- 0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL,
- 0x42c61557912aecd3ULL
-};
-
-static const u64 T1[256] = {
- 0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL,
- 0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL,
- 0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL,
- 0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL,
- 0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL,
- 0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL,
- 0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL,
- 0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL,
- 0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL,
- 0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL,
- 0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL,
- 0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL,
- 0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL,
- 0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL,
- 0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL,
- 0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL,
- 0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL,
- 0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL,
- 0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL,
- 0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL,
- 0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL,
- 0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL,
- 0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL,
- 0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL,
- 0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL,
- 0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL,
- 0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL,
- 0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL,
- 0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL,
- 0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL,
- 0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL,
- 0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL,
- 0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL,
- 0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL,
- 0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL,
- 0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL,
- 0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL,
- 0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL,
- 0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL,
- 0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL,
- 0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL,
- 0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL,
- 0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL,
- 0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL,
- 0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL,
- 0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL,
- 0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL,
- 0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL,
- 0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL,
- 0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL,
- 0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL,
- 0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL,
- 0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL,
- 0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL,
- 0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL,
- 0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL,
- 0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL,
- 0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL,
- 0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL,
- 0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL,
- 0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL,
- 0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL,
- 0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL,
- 0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL,
- 0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL,
- 0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL,
- 0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL,
- 0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL,
- 0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL,
- 0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL,
- 0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL,
- 0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL,
- 0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL,
- 0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL,
- 0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL,
- 0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL,
- 0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL,
- 0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL,
- 0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL,
- 0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL,
- 0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL,
- 0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL,
- 0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL,
- 0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL,
- 0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL,
- 0xc64257152a91d3ecULL
-};
-
-static const u64 T2[256] = {
- 0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL,
- 0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL,
- 0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL,
- 0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL,
- 0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL,
- 0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL,
- 0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL,
- 0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL,
- 0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL,
- 0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL,
- 0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL,
- 0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL,
- 0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL,
- 0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL,
- 0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL,
- 0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL,
- 0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL,
- 0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL,
- 0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL,
- 0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL,
- 0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL,
- 0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL,
- 0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL,
- 0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL,
- 0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL,
- 0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL,
- 0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL,
- 0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL,
- 0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL,
- 0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL,
- 0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL,
- 0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL,
- 0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL,
- 0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL,
- 0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL,
- 0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL,
- 0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL,
- 0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL,
- 0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL,
- 0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL,
- 0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL,
- 0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL,
- 0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL,
- 0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL,
- 0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL,
- 0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL,
- 0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL,
- 0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL,
- 0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL,
- 0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL,
- 0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL,
- 0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL,
- 0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL,
- 0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL,
- 0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL,
- 0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL,
- 0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL,
- 0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL,
- 0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL,
- 0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL,
- 0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL,
- 0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL,
- 0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL,
- 0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL,
- 0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL,
- 0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL,
- 0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL,
- 0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL,
- 0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL,
- 0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL,
- 0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL,
- 0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL,
- 0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL,
- 0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL,
- 0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL,
- 0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL,
- 0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL,
- 0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL,
- 0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL,
- 0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL,
- 0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL,
- 0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL,
- 0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL,
- 0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL,
- 0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL,
- 0x155742c6ecd3912aULL
-};
-
-static const u64 T3[256] = {
- 0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL,
- 0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL,
- 0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL,
- 0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL,
- 0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL,
- 0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL,
- 0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL,
- 0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL,
- 0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL,
- 0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL,
- 0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL,
- 0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL,
- 0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL,
- 0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL,
- 0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL,
- 0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL,
- 0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL,
- 0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL,
- 0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL,
- 0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL,
- 0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL,
- 0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL,
- 0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL,
- 0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL,
- 0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL,
- 0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL,
- 0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL,
- 0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL,
- 0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL,
- 0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL,
- 0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL,
- 0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL,
- 0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL,
- 0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL,
- 0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL,
- 0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL,
- 0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL,
- 0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL,
- 0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL,
- 0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL,
- 0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL,
- 0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL,
- 0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL,
- 0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL,
- 0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL,
- 0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL,
- 0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL,
- 0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL,
- 0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL,
- 0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL,
- 0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL,
- 0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL,
- 0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL,
- 0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL,
- 0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL,
- 0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL,
- 0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL,
- 0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL,
- 0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL,
- 0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL,
- 0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL,
- 0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL,
- 0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL,
- 0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL,
- 0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL,
- 0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL,
- 0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL,
- 0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL,
- 0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL,
- 0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL,
- 0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL,
- 0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL,
- 0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL,
- 0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL,
- 0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL,
- 0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL,
- 0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL,
- 0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL,
- 0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL,
- 0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL,
- 0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL,
- 0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL,
- 0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL,
- 0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL,
- 0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL,
- 0x5715c642d3ec2a91ULL
-};
-
-static const u64 T4[256] = {
- 0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL,
- 0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL,
- 0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL,
- 0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL,
- 0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL,
- 0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL,
- 0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL,
- 0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL,
- 0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL,
- 0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL,
- 0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL,
- 0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL,
- 0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL,
- 0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL,
- 0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL,
- 0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL,
- 0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL,
- 0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL,
- 0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL,
- 0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL,
- 0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL,
- 0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL,
- 0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL,
- 0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL,
- 0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL,
- 0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL,
- 0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL,
- 0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL,
- 0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL,
- 0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL,
- 0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL,
- 0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL,
- 0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL,
- 0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL,
- 0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL,
- 0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL,
- 0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL,
- 0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL,
- 0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL,
- 0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL,
- 0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL,
- 0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL,
- 0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL,
- 0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL,
- 0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL,
- 0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL,
- 0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL,
- 0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL,
- 0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL,
- 0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL,
- 0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL,
- 0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL,
- 0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL,
- 0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL,
- 0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL,
- 0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL,
- 0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL,
- 0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL,
- 0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL,
- 0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL,
- 0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL,
- 0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL,
- 0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL,
- 0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL,
- 0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL,
- 0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL,
- 0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL,
- 0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL,
- 0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL,
- 0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL,
- 0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL,
- 0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL,
- 0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL,
- 0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL,
- 0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL,
- 0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL,
- 0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL,
- 0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL,
- 0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL,
- 0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL,
- 0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL,
- 0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL,
- 0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL,
- 0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL,
- 0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL,
- 0x912aecd342c61557ULL
-};
-
-static const u64 T5[256] = {
- 0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL,
- 0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL,
- 0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL,
- 0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL,
- 0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL,
- 0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL,
- 0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL,
- 0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL,
- 0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL,
- 0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL,
- 0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL,
- 0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL,
- 0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL,
- 0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL,
- 0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL,
- 0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL,
- 0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL,
- 0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL,
- 0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL,
- 0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL,
- 0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL,
- 0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL,
- 0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL,
- 0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL,
- 0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL,
- 0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL,
- 0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL,
- 0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL,
- 0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL,
- 0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL,
- 0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL,
- 0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL,
- 0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL,
- 0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL,
- 0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL,
- 0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL,
- 0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL,
- 0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL,
- 0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL,
- 0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL,
- 0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL,
- 0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL,
- 0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL,
- 0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL,
- 0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL,
- 0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL,
- 0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL,
- 0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL,
- 0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL,
- 0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL,
- 0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL,
- 0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL,
- 0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL,
- 0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL,
- 0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL,
- 0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL,
- 0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL,
- 0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL,
- 0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL,
- 0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL,
- 0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL,
- 0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL,
- 0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL,
- 0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL,
- 0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL,
- 0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL,
- 0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL,
- 0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL,
- 0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL,
- 0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL,
- 0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL,
- 0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL,
- 0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL,
- 0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL,
- 0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL,
- 0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL,
- 0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL,
- 0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL,
- 0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL,
- 0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL,
- 0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL,
- 0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL,
- 0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL,
- 0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL,
- 0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL,
- 0x2a91d3ecc6425715ULL
-};
-
-static const u64 T6[256] = {
- 0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL,
- 0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL,
- 0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL,
- 0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL,
- 0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL,
- 0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL,
- 0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL,
- 0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL,
- 0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL,
- 0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL,
- 0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL,
- 0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL,
- 0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL,
- 0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL,
- 0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL,
- 0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL,
- 0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL,
- 0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL,
- 0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL,
- 0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL,
- 0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL,
- 0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL,
- 0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL,
- 0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL,
- 0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL,
- 0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL,
- 0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL,
- 0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL,
- 0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL,
- 0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL,
- 0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL,
- 0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL,
- 0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL,
- 0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL,
- 0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL,
- 0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL,
- 0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL,
- 0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL,
- 0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL,
- 0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL,
- 0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL,
- 0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL,
- 0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL,
- 0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL,
- 0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL,
- 0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL,
- 0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL,
- 0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL,
- 0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL,
- 0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL,
- 0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL,
- 0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL,
- 0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL,
- 0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL,
- 0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL,
- 0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL,
- 0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL,
- 0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL,
- 0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL,
- 0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL,
- 0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL,
- 0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL,
- 0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL,
- 0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL,
- 0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL,
- 0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL,
- 0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL,
- 0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL,
- 0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL,
- 0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL,
- 0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL,
- 0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL,
- 0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL,
- 0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL,
- 0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL,
- 0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL,
- 0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL,
- 0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL,
- 0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL,
- 0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL,
- 0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL,
- 0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL,
- 0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL,
- 0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL,
- 0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL,
- 0xecd3912a155742c6ULL
-};
-
-static const u64 T7[256] = {
- 0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL,
- 0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL,
- 0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL,
- 0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL,
- 0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL,
- 0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL,
- 0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL,
- 0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL,
- 0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL,
- 0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL,
- 0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL,
- 0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL,
- 0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL,
- 0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL,
- 0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL,
- 0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL,
- 0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL,
- 0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL,
- 0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL,
- 0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL,
- 0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL,
- 0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL,
- 0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL,
- 0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL,
- 0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL,
- 0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL,
- 0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL,
- 0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL,
- 0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL,
- 0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL,
- 0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL,
- 0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL,
- 0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL,
- 0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL,
- 0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL,
- 0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL,
- 0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL,
- 0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL,
- 0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL,
- 0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL,
- 0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL,
- 0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL,
- 0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL,
- 0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL,
- 0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL,
- 0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL,
- 0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL,
- 0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL,
- 0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL,
- 0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL,
- 0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL,
- 0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL,
- 0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL,
- 0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL,
- 0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL,
- 0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL,
- 0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL,
- 0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL,
- 0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL,
- 0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL,
- 0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL,
- 0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL,
- 0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL,
- 0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL,
- 0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL,
- 0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL,
- 0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL,
- 0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL,
- 0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL,
- 0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL,
- 0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL,
- 0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL,
- 0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL,
- 0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL,
- 0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL,
- 0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL,
- 0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL,
- 0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL,
- 0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL,
- 0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL,
- 0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL,
- 0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL,
- 0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL,
- 0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL,
- 0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL,
- 0xd3ec2a915715c642ULL
-};
-
-static const u64 c[KHAZAD_ROUNDS + 1] = {
- 0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL,
- 0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL,
- 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
-};
-
-static int khazad_setkey(void *ctx_arg, const u8 *in_key,
- unsigned int key_len, u32 *flags)
-{
-
- struct khazad_ctx *ctx = ctx_arg;
- int r;
- const u64 *S = T7;
- u64 K2, K1;
-
- if (key_len != 16)
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- K2 = ((u64)in_key[ 0] << 56) ^
- ((u64)in_key[ 1] << 48) ^
- ((u64)in_key[ 2] << 40) ^
- ((u64)in_key[ 3] << 32) ^
- ((u64)in_key[ 4] << 24) ^
- ((u64)in_key[ 5] << 16) ^
- ((u64)in_key[ 6] << 8) ^
- ((u64)in_key[ 7] );
- K1 = ((u64)in_key[ 8] << 56) ^
- ((u64)in_key[ 9] << 48) ^
- ((u64)in_key[10] << 40) ^
- ((u64)in_key[11] << 32) ^
- ((u64)in_key[12] << 24) ^
- ((u64)in_key[13] << 16) ^
- ((u64)in_key[14] << 8) ^
- ((u64)in_key[15] );
-
- /* setup the encrypt key */
- for (r = 0; r <= KHAZAD_ROUNDS; r++) {
- ctx->E[r] = T0[(int)(K1 >> 56) ] ^
- T1[(int)(K1 >> 48) & 0xff] ^
- T2[(int)(K1 >> 40) & 0xff] ^
- T3[(int)(K1 >> 32) & 0xff] ^
- T4[(int)(K1 >> 24) & 0xff] ^
- T5[(int)(K1 >> 16) & 0xff] ^
- T6[(int)(K1 >> 8) & 0xff] ^
- T7[(int)(K1 ) & 0xff] ^
- c[r] ^ K2;
- K2 = K1;
- K1 = ctx->E[r];
- }
- /* Setup the decrypt key */
- ctx->D[0] = ctx->E[KHAZAD_ROUNDS];
- for (r = 1; r < KHAZAD_ROUNDS; r++) {
- K1 = ctx->E[KHAZAD_ROUNDS - r];
- ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^
- T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^
- T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^
- T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^
- T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^
- T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^
- T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^
- T7[(int)S[(int)(K1 ) & 0xff] & 0xff];
- }
- ctx->D[KHAZAD_ROUNDS] = ctx->E[0];
-
- return 0;
-
-}
-
-static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
-{
-
- int r;
- u64 state;
-
- state = ((u64)plaintext[0] << 56) ^
- ((u64)plaintext[1] << 48) ^
- ((u64)plaintext[2] << 40) ^
- ((u64)plaintext[3] << 32) ^
- ((u64)plaintext[4] << 24) ^
- ((u64)plaintext[5] << 16) ^
- ((u64)plaintext[6] << 8) ^
- ((u64)plaintext[7] ) ^
- roundKey[0];
-
- for (r = 1; r < KHAZAD_ROUNDS; r++) {
- state = T0[(int)(state >> 56) ] ^
- T1[(int)(state >> 48) & 0xff] ^
- T2[(int)(state >> 40) & 0xff] ^
- T3[(int)(state >> 32) & 0xff] ^
- T4[(int)(state >> 24) & 0xff] ^
- T5[(int)(state >> 16) & 0xff] ^
- T6[(int)(state >> 8) & 0xff] ^
- T7[(int)(state ) & 0xff] ^
- roundKey[r];
- }
-
- state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^
- (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^
- (T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^
- (T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^
- (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^
- (T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^
- (T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^
- (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
- roundKey[KHAZAD_ROUNDS];
-
- ciphertext[0] = (u8)(state >> 56);
- ciphertext[1] = (u8)(state >> 48);
- ciphertext[2] = (u8)(state >> 40);
- ciphertext[3] = (u8)(state >> 32);
- ciphertext[4] = (u8)(state >> 24);
- ciphertext[5] = (u8)(state >> 16);
- ciphertext[6] = (u8)(state >> 8);
- ciphertext[7] = (u8)(state );
-
-}
-
-static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
- struct khazad_ctx *ctx = ctx_arg;
- khazad_crypt(ctx->E, dst, src);
-}
-
-static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
- struct khazad_ctx *ctx = ctx_arg;
- khazad_crypt(ctx->D, dst, src);
-}
-
-static struct crypto_alg khazad_alg = {
- .cra_name = "khazad",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = KHAZAD_BLOCK_SIZE,
- .cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list),
- .cra_u = { .cipher = {
- .cia_min_keysize = KHAZAD_KEY_SIZE,
- .cia_max_keysize = KHAZAD_KEY_SIZE,
- .cia_setkey = khazad_setkey,
- .cia_encrypt = khazad_encrypt,
- .cia_decrypt = khazad_decrypt } }
-};
-
-static int __init init(void)
-{
- int ret = 0;
-
- ret = crypto_register_alg(&khazad_alg);
- return ret;
-}
-
-static void __exit fini(void)
-{
- crypto_unregister_alg(&khazad_alg);
-}
-
-
-module_init(init);
-module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
{
printk("- Added public key %X%X\n", pk->keyid[0], pk->keyid[1]);
-// if (pk->expiredate && pk->expiredate < xtime.tv_sec)
-// printk(" - public key has expired\n");
+ if (pk->expiredate && pk->expiredate < xtime.tv_sec)
+ printk(" - public key has expired\n");
if (pk->timestamp > xtime.tv_sec )
printk(" - key was been created %lu seconds in future\n",
struct ksign_public_key *pk;
uint8_t sha1[SHA1_DIGEST_SIZE];
MPI result = NULL;
- int rc = 0;
+ int rc = 0, i;
pk = ksign_get_public_key(sig->keyid);
if (!pk) {
test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS);
test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS);
- //KHAZAD
- test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS);
- test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS);
-
test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
test_deflate();
test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS);
break;
- case 21:
- test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS);
- test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS);
- break;
-
#ifdef CONFIG_CRYPTO_HMAC
case 100:
test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS);
}
};
-/*
- * KHAZAD test vectors.
- */
-#define KHAZAD_ENC_TEST_VECTORS 5
-#define KHAZAD_DEC_TEST_VECTORS 5
-struct cipher_testvec khazad_enc_tv_template[] = {
- {
- .key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .klen = 16,
- .input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .ilen = 8,
- .result = { 0x49, 0xa4, 0xce, 0x32, 0xac, 0x19, 0x0e, 0x3f },
- .rlen = 8,
- }, {
- .key = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
- 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .klen = 16,
- .input = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .ilen = 8,
- .result = { 0x7e, 0x82, 0x12, 0xa1, 0Xd9, 0X5b, 0Xe4, 0Xf9 },
- .rlen = 8,
- }, {
- .key = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2,
- 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .klen = 16,
- .input = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .ilen = 8,
- .result = { 0Xaa, 0Xbe, 0Xc1, 0X95, 0Xc5, 0X94, 0X1a, 0X9c },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .ilen = 8,
- .result = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f ,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .ilen = 16,
- .result = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 ,
- 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .rlen = 16,
- },
-};
-
-struct cipher_testvec khazad_dec_tv_template[] = {
- {
- .key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .klen = 16,
- .input = { 0X49, 0Xa4, 0Xce, 0X32, 0Xac, 0X19, 0X0e, 0X3f },
- .ilen = 8,
- .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .rlen = 8,
- }, {
- .key = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
- 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .klen = 16,
- .input = { 0X7e, 0X82, 0X12, 0Xa1, 0Xd9, 0X5b, 0Xe4, 0Xf9 },
- .ilen = 8,
- .result = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .rlen = 8,
- }, {
- .key = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2,
- 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .klen = 16,
- .input = { 0Xaa, 0Xbe, 0Xc1, 0X95, 0Xc5, 0X94, 0X1a, 0X9c },
- .ilen = 8,
- .result = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .rlen = 8,
- }, {
- .key = { 0x2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .ilen = 8,
- .result = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 ,
- 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .ilen = 16,
- .result = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f ,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .rlen = 16,
- },
-};
/*
* Compression stuff.
obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
-obj-$(CONFIG_CRASH_DUMP) += dump/
obj-y += firmware/
acpi_set_register(ACPI_BITREG_RT_CLOCK_ENABLE, 1, ACPI_MTX_LOCK);
- *ppos += count;
+ file->f_pos += count;
result = 0;
end:
{
acpi_status status = AE_OK;
struct acpi_buffer dsdt = {ACPI_ALLOCATE_BUFFER, NULL};
- ssize_t res;
+ void *data = NULL;
+ size_t size = 0;
ACPI_FUNCTION_TRACE("acpi_system_read_dsdt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- res = simple_read_from_buffer(buffer, count, ppos,
- dsdt.pointer, dsdt.length);
+ if (*ppos < dsdt.length) {
+ data = dsdt.pointer + file->f_pos;
+ size = dsdt.length - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, data, size)) {
+ acpi_os_free(dsdt.pointer);
+ return_VALUE(-EFAULT);
+ }
+ }
+
acpi_os_free(dsdt.pointer);
- return_VALUE(res);
+ *ppos += size;
+
+ return_VALUE(size);
}
{
acpi_status status = AE_OK;
struct acpi_buffer fadt = {ACPI_ALLOCATE_BUFFER, NULL};
- ssize_t res;
+ void *data = NULL;
+ size_t size = 0;
ACPI_FUNCTION_TRACE("acpi_system_read_fadt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- res = simple_read_from_buffer(buffer, count, ppos,
- fadt.pointer, fadt.length);
+ if (*ppos < fadt.length) {
+ data = fadt.pointer + file->f_pos;
+ size = fadt.length - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, data, size)) {
+ acpi_os_free(fadt.pointer);
+ return_VALUE(-EFAULT);
+ }
+ }
+
acpi_os_free(fadt.pointer);
- return_VALUE(res);
+ *ppos += size;
+
+ return_VALUE(size);
}
if (alignment <= 0x10) {
t = kmalloc (size, flags);
- if ((unsigned long)t & (alignment-1)) {
+ if ((unsigned int)t & (alignment-1)) {
printk ("Kmalloc doesn't align things correctly! %p\n", t);
kfree (t);
return aligned_kmalloc (size, flags, alignment * 4);
#endif
-static const struct atmdev_ops fore200e_ops;
-static const struct fore200e_bus fore200e_bus[];
+extern const struct atmdev_ops fore200e_ops;
+extern const struct fore200e_bus fore200e_bus[];
static struct fore200e* fore200e_boards = NULL;
#define IF_IADBG_SUNI_STAT 0x02000000 // suni statistics
#define IF_IADBG_RESET 0x04000000
+extern unsigned int IADebugFlag;
+
#define IF_IADBG(f) if (IADebugFlag & (f))
#ifdef CONFIG_ATM_IA_DEBUG /* Debug build */
menu "Generic Driver Options"
-config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
- default y
- help
- Select this option if you don't have magic firmware for drivers that
- need it.
-
- If unsure, say Y.
-
config PREVENT_FIRMWARE_BUILD
bool "Prevent firmware from being built"
default y
Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
floppy controller on the Macintosh IIfx and Quadra 900/950.
-config MAC_FLOPPY
- tristate "Support for PowerMac floppy"
- depends on PPC_PMAC && !PPC_PMAC64
- help
- If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
- floppy controller, say Y here. Most commonly found in PowerMacs.
-
config BLK_DEV_PS2
tristate "PS/2 ESDI hard disk support"
depends on MCA && MCA_LEGACY
# kblockd threads
#
-obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o ckrm-iostub.o
+obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
-obj-$(CONFIG_CKRM_RES_BLKIO) += ckrm-io.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_BLK_DEV_FD98) += floppy98.o
static void fd_deselect( void );
static void fd_motor_off_timer( unsigned long dummy );
static void check_change( unsigned long dummy );
+static __inline__ void set_head_settle_flag( void );
+static __inline__ int get_head_settle_flag( void );
static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
static void fd_times_out( unsigned long dummy );
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
+static __inline__ void copy_buffer( void *from, void *to);
static void setup_req_params( int drive );
static void redo_fd_request( void);
static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
static struct timer_list fd_timer =
TIMER_INITIALIZER(check_change, 0, 0);
-static inline void start_motor_off_timer(void)
+static inline void
+start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
MotorOffTrys = 0;
}
-static inline void start_check_change_timer( void )
+static inline void
+start_check_change_timer( void )
{
mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
}
-static inline void start_timeout(void)
+static inline void
+start_timeout(void)
{
mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
}
-static inline void stop_timeout(void)
+static inline void
+stop_timeout(void)
{
del_timer(&timeout_timer);
}
* seek operation, because we don't use seeks with verify.
*/
-static inline void set_head_settle_flag(void)
+static __inline__ void set_head_settle_flag( void )
{
HeadSettleFlag = FDCCMDADD_E;
}
-static inline int get_head_settle_flag(void)
+static __inline__ int get_head_settle_flag( void )
{
int tmp = HeadSettleFlag;
HeadSettleFlag = 0;
return( tmp );
}
-static inline void copy_buffer(void *from, void *to)
-{
- ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
- int cnt;
-
- for (cnt = 512/4; cnt; cnt--)
- *p2++ = *p1++;
-}
-
return 0;
}
+static __inline__ void copy_buffer(void *from, void *to)
+{
+ ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
+ int cnt;
+
+ for( cnt = 512/4; cnt; cnt-- )
+ *p2++ = *p1++;
+}
+
/* This sets up the global variables describing the current request. */
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/spinlock.h>
-#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/io.h>
int cciss_ioctl32_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
+ IOCTL32_Command_struct *arg32 =
+ (IOCTL32_Command_struct *) arg;
IOCTL_Command_struct arg64;
- IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ mm_segment_t old_fs;
int err;
- u32 cp;
+ unsigned long cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
+ arg64.buf = (BYTE *)cp;
if (err)
return -EFAULT;
- err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) p);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) &arg64);
+ set_fs(old_fs);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
+ err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
}
-
int cciss_ioctl32_big_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
+ BIG_IOCTL32_Command_struct *arg32 =
+ (BIG_IOCTL32_Command_struct *) arg;
BIG_IOCTL_Command_struct arg64;
- BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ mm_segment_t old_fs;
int err;
- u32 cp;
+ unsigned long cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
+ arg64.buf = (BYTE *)cp;
if (err)
return -EFAULT;
- err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) p);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) &arg64);
+ set_fs(old_fs);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
+ err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
+++ /dev/null
-/*
- * linux/drivers/block/cfq-iosched.c
- *
- * CFQ, or complete fairness queueing, disk scheduler.
- *
- * Based on ideas from a previously unfinished io
- * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
- *
- * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/rbtree.h>
-#include <linux/mempool.h>
-
-/*
- * tunables
- */
-static int cfq_quantum = 4;
-static int cfq_queued = 8;
-
-#define CFQ_QHASH_SHIFT 6
-#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
-
-#define CFQ_MHASH_SHIFT 8
-#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
-#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
-#define CFQ_MHASH_FN(sec) (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
-#define ON_MHASH(crq) !list_empty(&(crq)->hash)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct cfq_rq, hash)
-
-#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
-
-#define RQ_DATA(rq) ((struct cfq_rq *) (rq)->elevator_private)
-
-static kmem_cache_t *crq_pool;
-static kmem_cache_t *cfq_pool;
-static mempool_t *cfq_mpool;
-
-struct cfq_data {
- struct list_head rr_list;
- struct list_head *dispatch;
- struct list_head *cfq_hash;
-
- struct list_head *crq_hash;
-
- unsigned int busy_queues;
- unsigned int max_queued;
-
- mempool_t *crq_pool;
-};
-
-struct cfq_queue {
- struct list_head cfq_hash;
- struct list_head cfq_list;
- struct rb_root sort_list;
- int pid;
- int queued[2];
-#if 0
- /*
- * with a simple addition like this, we can do io priorities. almost.
- * does need a split request free list, too.
- */
- int io_prio
-#endif
-};
-
-struct cfq_rq {
- struct rb_node rb_node;
- sector_t rb_key;
-
- struct request *request;
-
- struct cfq_queue *cfq_queue;
-
- struct list_head hash;
-};
-
-static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid);
-static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq);
-
-/*
- * lots of deadline iosched dupes, can be abstracted later...
- */
-static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
-{
- list_del_init(&crq->hash);
-}
-
-static inline void cfq_del_crq_hash(struct cfq_rq *crq)
-{
- if (ON_MHASH(crq))
- __cfq_del_crq_hash(crq);
-}
-
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
-{
- cfq_del_crq_hash(crq);
-
- if (q->last_merge == crq->request)
- q->last_merge = NULL;
-}
-
-static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
-{
- struct request *rq = crq->request;
-
- BUG_ON(ON_MHASH(crq));
-
- list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
-}
-
-static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
-{
- struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
-
- while ((entry = next) != hash_list) {
- struct cfq_rq *crq = list_entry_hash(entry);
- struct request *__rq = crq->request;
-
- next = entry->next;
-
- BUG_ON(!ON_MHASH(crq));
-
- if (!rq_mergeable(__rq)) {
- __cfq_del_crq_hash(crq);
- continue;
- }
-
- if (rq_hash_key(__rq) == offset)
- return __rq;
- }
-
- return NULL;
-}
-
-/*
- * rb tree support functions
- */
-#define RB_NONE (2)
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
-#define ON_RB(node) ((node)->rb_color != RB_NONE)
-#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
-#define rq_rb_key(rq) (rq)->sector
-
-static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
- if (ON_RB(&crq->rb_node)) {
- cfqq->queued[rq_data_dir(crq->request)]--;
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- crq->cfq_queue = NULL;
- }
-}
-
-static struct cfq_rq *
-__cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
- struct rb_node **p = &cfqq->sort_list.rb_node;
- struct rb_node *parent = NULL;
- struct cfq_rq *__crq;
-
- while (*p) {
- parent = *p;
- __crq = rb_entry_crq(parent);
-
- if (crq->rb_key < __crq->rb_key)
- p = &(*p)->rb_left;
- else if (crq->rb_key > __crq->rb_key)
- p = &(*p)->rb_right;
- else
- return __crq;
- }
-
- rb_link_node(&crq->rb_node, parent, p);
- return 0;
-}
-
-static void
-cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
-{
- struct request *rq = crq->request;
- struct cfq_rq *__alias;
-
- crq->rb_key = rq_rb_key(rq);
- cfqq->queued[rq_data_dir(rq)]++;
-retry:
- __alias = __cfq_add_crq_rb(cfqq, crq);
- if (!__alias) {
- rb_insert_color(&crq->rb_node, &cfqq->sort_list);
- crq->cfq_queue = cfqq;
- return;
- }
-
- cfq_del_crq_rb(cfqq, __alias);
- cfq_dispatch_sort(cfqd->dispatch, __alias);
- goto retry;
-}
-
-static struct request *
-cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
-{
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
- struct rb_node *n;
-
- if (!cfqq)
- goto out;
-
- n = cfqq->sort_list.rb_node;
- while (n) {
- struct cfq_rq *crq = rb_entry_crq(n);
-
- if (sector < crq->rb_key)
- n = n->rb_left;
- else if (sector > crq->rb_key)
- n = n->rb_right;
- else
- return crq->request;
- }
-
-out:
- return NULL;
-}
-
-static void cfq_remove_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
-
- if (crq) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- cfq_remove_merge_hints(q, crq);
- list_del_init(&rq->queuelist);
-
- if (cfqq) {
- cfq_del_crq_rb(cfqq, crq);
-
- if (RB_EMPTY(&cfqq->sort_list))
- cfq_put_queue(cfqd, cfqq);
- }
- }
-}
-
-static int
-cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct request *__rq;
- int ret;
-
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE) {
- __rq = q->last_merge;
- goto out_insert;
- }
-
- __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
- if (__rq) {
- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
- }
-
- __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
- if (__rq) {
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_FRONT_MERGE;
- goto out;
- }
- }
-
- return ELEVATOR_NO_MERGE;
-out:
- q->last_merge = __rq;
-out_insert:
- *req = __rq;
- return ret;
-}
-
-static void cfq_merged_request(request_queue_t *q, struct request *req)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_DATA(req);
-
- cfq_del_crq_hash(crq);
- cfq_add_crq_hash(cfqd, crq);
-
- if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- cfq_del_crq_rb(cfqq, crq);
- cfq_add_crq_rb(cfqd, cfqq, crq);
- }
-
- q->last_merge = req;
-}
-
-static void
-cfq_merged_requests(request_queue_t *q, struct request *req,
- struct request *next)
-{
- cfq_merged_request(q, req);
- cfq_remove_request(q, next);
-}
-
-static void cfq_dispatch_sort(struct list_head *head, struct cfq_rq *crq)
-{
- struct list_head *entry = head;
- struct request *__rq;
-
- if (!list_empty(head)) {
- __rq = list_entry_rq(head->next);
-
- if (crq->request->sector < __rq->sector) {
- entry = head->prev;
- goto link;
- }
- }
-
- while ((entry = entry->prev) != head) {
- __rq = list_entry_rq(entry);
-
- if (crq->request->sector <= __rq->sector)
- break;
- }
-
-link:
- list_add_tail(&crq->request->queuelist, entry);
-}
-
-static inline void
-__cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
-
- cfq_del_crq_rb(cfqq, crq);
- cfq_remove_merge_hints(q, crq);
- cfq_dispatch_sort(cfqd->dispatch, crq);
-}
-
-static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq;
- struct list_head *entry, *tmp;
- int ret, queued, good_queues;
-
- if (list_empty(&cfqd->rr_list))
- return 0;
-
- queued = ret = 0;
-restart:
- good_queues = 0;
- list_for_each_safe(entry, tmp, &cfqd->rr_list) {
- cfqq = list_entry_cfqq(cfqd->rr_list.next);
-
- BUG_ON(RB_EMPTY(&cfqq->sort_list));
-
- __cfq_dispatch_requests(q, cfqd, cfqq);
-
- if (RB_EMPTY(&cfqq->sort_list))
- cfq_put_queue(cfqd, cfqq);
- else
- good_queues++;
-
- queued++;
- ret = 1;
- }
-
- if ((queued < cfq_quantum) && good_queues)
- goto restart;
-
- return ret;
-}
-
-static struct request *cfq_next_request(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct request *rq;
-
- if (!list_empty(cfqd->dispatch)) {
- struct cfq_rq *crq;
-dispatch:
- rq = list_entry_rq(cfqd->dispatch->next);
-
- crq = RQ_DATA(rq);
- if (crq)
- cfq_remove_merge_hints(q, crq);
-
- return rq;
- }
-
- if (cfq_dispatch_requests(q, cfqd))
- goto dispatch;
-
- return NULL;
-}
-
-static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
-{
- struct list_head *hash_list = &cfqd->cfq_hash[hashval];
- struct list_head *entry;
-
- list_for_each(entry, hash_list) {
- struct cfq_queue *__cfqq = list_entry_qhash(entry);
-
- if (__cfqq->pid == pid)
- return __cfqq;
- }
-
- return NULL;
-}
-
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
-{
- const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
-
- return __cfq_find_cfq_hash(cfqd, pid, hashval);
-}
-
-static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- cfqd->busy_queues--;
- list_del(&cfqq->cfq_list);
- list_del(&cfqq->cfq_hash);
- mempool_free(cfqq, cfq_mpool);
-}
-
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid)
-{
- const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
- struct cfq_queue *cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
-
- if (!cfqq) {
- cfqq = mempool_alloc(cfq_mpool, GFP_NOIO);
-
- INIT_LIST_HEAD(&cfqq->cfq_hash);
- INIT_LIST_HEAD(&cfqq->cfq_list);
- RB_CLEAR_ROOT(&cfqq->sort_list);
-
- cfqq->pid = pid;
- cfqq->queued[0] = cfqq->queued[1] = 0;
- list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
- }
-
- return cfqq;
-}
-
-static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
-{
- struct cfq_queue *cfqq;
-
- cfqq = cfq_get_queue(cfqd, current->tgid);
-
- cfq_add_crq_rb(cfqd, cfqq, crq);
-
- if (list_empty(&cfqq->cfq_list)) {
- list_add(&cfqq->cfq_list, &cfqd->rr_list);
- cfqd->busy_queues++;
- }
-}
-
-static void
-cfq_insert_request(request_queue_t *q, struct request *rq, int where)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
-
- switch (where) {
- case ELEVATOR_INSERT_BACK:
- while (cfq_dispatch_requests(q, cfqd))
- ;
- list_add_tail(&rq->queuelist, cfqd->dispatch);
- break;
- case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, cfqd->dispatch);
- break;
- case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(cfqd, crq);
- break;
- default:
- printk("%s: bad insert point %d\n", __FUNCTION__,where);
- return;
- }
-
- if (rq_mergeable(rq)) {
- cfq_add_crq_hash(cfqd, crq);
-
- if (!q->last_merge)
- q->last_merge = rq;
- }
-}
-
-static int cfq_queue_empty(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
-
- if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
- return 1;
-
- return 0;
-}
-
-static struct request *
-cfq_former_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_rq *crq = RQ_DATA(rq);
- struct rb_node *rbprev = rb_prev(&crq->rb_node);
-
- if (rbprev)
- return rb_entry_crq(rbprev)->request;
-
- return NULL;
-}
-
-static struct request *
-cfq_latter_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_rq *crq = RQ_DATA(rq);
- struct rb_node *rbnext = rb_next(&crq->rb_node);
-
- if (rbnext)
- return rb_entry_crq(rbnext)->request;
-
- return NULL;
-}
-
-static int cfq_may_queue(request_queue_t *q, int rw)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_queue *cfqq;
- int ret = 1;
-
- if (!cfqd->busy_queues)
- goto out;
-
- cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
- if (cfqq) {
- int limit = (q->nr_requests - cfq_queued) / cfqd->busy_queues;
-
- if (limit < 3)
- limit = 3;
- else if (limit > cfqd->max_queued)
- limit = cfqd->max_queued;
-
- if (cfqq->queued[rw] > limit)
- ret = 0;
- }
-out:
- return ret;
-}
-
-static void cfq_put_request(request_queue_t *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
-
- if (crq) {
- BUG_ON(q->last_merge == rq);
- BUG_ON(ON_MHASH(crq));
-
- mempool_free(crq, cfqd->crq_pool);
- rq->elevator_private = NULL;
- }
-}
-
-static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
-
- if (crq) {
- RB_CLEAR(&crq->rb_node);
- crq->request = rq;
- crq->cfq_queue = NULL;
- INIT_LIST_HEAD(&crq->hash);
- rq->elevator_private = crq;
- return 0;
- }
-
- return 1;
-}
-
-static void cfq_exit(request_queue_t *q, elevator_t *e)
-{
- struct cfq_data *cfqd = e->elevator_data;
-
- e->elevator_data = NULL;
- mempool_destroy(cfqd->crq_pool);
- kfree(cfqd->crq_hash);
- kfree(cfqd->cfq_hash);
- kfree(cfqd);
-}
-
-static int cfq_init(request_queue_t *q, elevator_t *e)
-{
- struct cfq_data *cfqd;
- int i;
-
- cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
- if (!cfqd)
- return -ENOMEM;
-
- memset(cfqd, 0, sizeof(*cfqd));
- INIT_LIST_HEAD(&cfqd->rr_list);
-
- cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
- if (!cfqd->crq_hash)
- goto out_crqhash;
-
- cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
- if (!cfqd->cfq_hash)
- goto out_cfqhash;
-
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
- if (!cfqd->crq_pool)
- goto out_crqpool;
-
- for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
- INIT_LIST_HEAD(&cfqd->crq_hash[i]);
- for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
- INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
-
- cfqd->dispatch = &q->queue_head;
- e->elevator_data = cfqd;
-
- /*
- * just set it to some high value, we want anyone to be able to queue
- * some requests. fairness is handled differently
- */
- cfqd->max_queued = q->nr_requests;
- q->nr_requests = 8192;
-
- return 0;
-out_crqpool:
- kfree(cfqd->cfq_hash);
-out_cfqhash:
- kfree(cfqd->crq_hash);
-out_crqhash:
- kfree(cfqd);
- return -ENOMEM;
-}
-
-static int __init cfq_slab_setup(void)
-{
- crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
- NULL, NULL);
-
- if (!crq_pool)
- panic("cfq_iosched: can't init crq pool\n");
-
- cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
- NULL, NULL);
-
- if (!cfq_pool)
- panic("cfq_iosched: can't init cfq pool\n");
-
- cfq_mpool = mempool_create(64, mempool_alloc_slab, mempool_free_slab, cfq_pool);
-
- if (!cfq_mpool)
- panic("cfq_iosched: can't init cfq mpool\n");
-
- return 0;
-}
-
-subsys_initcall(cfq_slab_setup);
-
-elevator_t iosched_cfq = {
- .elevator_name = "cfq",
- .elevator_merge_fn = cfq_merge,
- .elevator_merged_fn = cfq_merged_request,
- .elevator_merge_req_fn = cfq_merged_requests,
- .elevator_next_req_fn = cfq_next_request,
- .elevator_add_req_fn = cfq_insert_request,
- .elevator_remove_req_fn = cfq_remove_request,
- .elevator_queue_empty_fn = cfq_queue_empty,
- .elevator_former_req_fn = cfq_former_request,
- .elevator_latter_req_fn = cfq_latter_request,
- .elevator_set_req_fn = cfq_set_request,
- .elevator_put_req_fn = cfq_put_request,
- .elevator_may_queue_fn = cfq_may_queue,
- .elevator_init_fn = cfq_init,
- .elevator_exit_fn = cfq_exit,
-};
-
-EXPORT_SYMBOL(iosched_cfq);
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * IO priorities are supported, from 0% to 100% in 5% increments. Both of
- * those values have special meaning - 0% class is allowed to do io if
- * noone else wants to use the disk. 100% is considered real-time io, and
- * always get priority. Default process io rate is 95%. In absence of other
- * io, a class may consume 100% disk bandwidth regardless. Withing a class,
- * bandwidth is distributed equally among the citizens.
- *
- * TODO:
- * - cfq_select_requests() needs some work for 5-95% io
- * - barriers not supported
- * - export grace periods in ms, not jiffies
- *
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
*/
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/mempool.h>
-#include <asm/div64.h>
-
-#if IOPRIO_NR > BITS_PER_LONG
-#error Cannot support this many io priority levels
-#endif
-
-#define LIMIT_DEBUG 1
/*
* tunables
*/
-static int cfq_quantum = 6;
-static int cfq_quantum_io = 256;
-static int cfq_idle_quantum = 1;
-static int cfq_idle_quantum_io = 64;
-static int cfq_queued = 4;
-static int cfq_grace_rt = HZ / 100 ?: 1;
-static int cfq_grace_idle = HZ / 10;
+static int cfq_quantum = 4;
+static int cfq_queued = 8;
#define CFQ_QHASH_SHIFT 6
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
+#define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
#define CFQ_MHASH_SHIFT 8
#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
#define CFQ_MHASH_FN(sec) (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
+#define ON_MHASH(crq) !list_empty(&(crq)->hash)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
+#define list_entry_hash(ptr) list_entry((ptr), struct cfq_rq, hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
-#define list_entry_prio(ptr) list_entry((ptr), struct cfq_rq, prio_list)
-
-#define cfq_account_io(crq) \
- ((crq)->ioprio != IOPRIO_IDLE && (crq)->ioprio != IOPRIO_RT)
-
-/* define to be 50 ms for now; make tunable later */
-#define CFQ_EPOCH 50000
-/* Needs to be made tunable right away, in MiB/s */
-#define CFQ_DISKBW 10
-/* Temporary global limit, as percent of available b/w, for each "class" */
-#define CFQ_TEMPLIM 10
-
-/*
- * defines how we distribute bandwidth (can be tgid, uid, etc)
- */
-
-/* FIXME: change hash_key to be sizeof(void *) rather than sizeof(int)
- * otherwise the cast of cki_tsk_icls will not work reliably on 64-bit arches.
- * OR, change cki_tsk_icls to return ints (will need another id space to be
- * managed)
- */
-
-#if defined(CONFIG_CKRM_RES_BLKIO) || defined(CONFIG_CKRM_RES_BLKIO_MODULE)
-extern inline void *cki_hash_key(struct task_struct *tsk);
-extern inline int cki_ioprio(struct task_struct *tsk);
-#define cfq_hash_key(current) ((int)cki_hash_key((current)))
-#define cfq_ioprio(current) (cki_ioprio((current)))
-
-#else
-#define cfq_hash_key(current) ((current)->tgid)
-
-/*
- * move to io_context
- */
-#define cfq_ioprio(current) ((current)->ioprio)
-#endif
-#define CFQ_WAIT_RT 0
-#define CFQ_WAIT_NORM 1
+#define RQ_DATA(rq) ((struct cfq_rq *) (rq)->elevator_private)
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
static mempool_t *cfq_mpool;
-/*
- * defines an io priority level
- */
-struct io_prio_data {
- struct list_head rr_list;
- int busy_queues;
- int busy_rq;
- unsigned long busy_sectors;
-
- /* requests, sectors and queues
- * added(in),dispatched/deleted(out)
- * at this priority level.
- */
- atomic_t cum_rq_in,cum_rq_out;
- atomic_t cum_sectors_in,cum_sectors_out;
- atomic_t cum_queues_in,cum_queues_out;
-
-#ifdef LIMIT_DEBUG
- int nskip;
- unsigned long navsec;
- unsigned long csectorate;
- unsigned long lsectorate;
-#endif
-
- struct list_head prio_list;
- int last_rq;
- int last_sectors;
-};
-
-/*
- * per-request queue structure
- */
struct cfq_data {
struct list_head rr_list;
struct list_head *dispatch;
- struct hlist_head *cfq_hash;
- struct hlist_head *crq_hash;
- mempool_t *crq_pool;
+ struct list_head *cfq_hash;
- struct io_prio_data cid[IOPRIO_NR];
+ struct list_head *crq_hash;
- /*
- * total number of busy queues and requests
- */
- int busy_rq;
- int busy_queues;
- unsigned long busy_sectors;
+ unsigned int busy_queues;
+ unsigned int max_queued;
+ mempool_t *crq_pool;
request_queue_t *queue;
- unsigned long rq_starved_mask;
-
- /*
- * grace period handling
- */
- struct timer_list timer;
- unsigned long wait_end;
- unsigned long flags;
- struct work_struct work;
/*
* tunables
*/
unsigned int cfq_quantum;
- unsigned int cfq_quantum_io;
- unsigned int cfq_idle_quantum;
- unsigned int cfq_idle_quantum_io;
unsigned int cfq_queued;
- unsigned int cfq_grace_rt;
- unsigned int cfq_grace_idle;
-
- unsigned long cfq_epoch; /* duration for limit enforcement */
- unsigned long cfq_epochsectors; /* max sectors dispatchable/epoch */
};
-/*
- * per-class structure
- */
struct cfq_queue {
+ struct list_head cfq_hash;
struct list_head cfq_list;
- struct hlist_node cfq_hash;
- int hash_key;
struct rb_root sort_list;
+ int pid;
int queued[2];
- int ioprio;
-
- unsigned long avsec; /* avg sectors dispatched/epoch */
- unsigned long long lastime; /* timestamp of last request served */
- unsigned long sectorate; /* limit for sectors served/epoch */
- int skipped; /* queue skipped at last dispatch ? */
+#if 0
+ /*
+ * with a simple addition like this, we can do io priorities. almost.
+ * does need a split request free list, too.
+ */
+ int io_prio
+#endif
};
-/*
- * per-request structure
- */
struct cfq_rq {
- struct cfq_queue *cfq_queue;
struct rb_node rb_node;
- struct hlist_node hash;
sector_t rb_key;
struct request *request;
- struct list_head prio_list;
- unsigned long nr_sectors;
- int ioprio;
+
+ struct cfq_queue *cfq_queue;
+
+ struct list_head hash;
};
static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
+static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
+{
+ list_del_init(&crq->hash);
+}
+
static inline void cfq_del_crq_hash(struct cfq_rq *crq)
{
- hlist_del_init(&crq->hash);
+ if (ON_MHASH(crq))
+ __cfq_del_crq_hash(crq);
}
-static inline void
-cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
+static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
{
cfq_del_crq_hash(crq);
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
struct request *rq = crq->request;
- const int hash_idx = CFQ_MHASH_FN(rq_hash_key(rq));
- BUG_ON(!hlist_unhashed(&crq->hash));
-
- hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
+ BUG_ON(ON_MHASH(crq));
+
+ list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
}
static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
{
- struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
- struct hlist_node *entry, *next;
+ struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+ struct list_head *entry, *next = hash_list->next;
- hlist_for_each_safe(entry, next, hash_list) {
+ while ((entry = next) != hash_list) {
struct cfq_rq *crq = list_entry_hash(entry);
struct request *__rq = crq->request;
- BUG_ON(hlist_unhashed(&crq->hash));
+ next = entry->next;
+
+ BUG_ON(!ON_MHASH(crq));
if (!rq_mergeable(__rq)) {
- cfq_del_crq_hash(crq);
+ __cfq_del_crq_hash(crq);
continue;
}
/*
* rb tree support functions
*/
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
+#define RB_NONE (2)
+#define RB_EMPTY(node) ((node)->rb_node == NULL)
+#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
+#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
+#define ON_RB(node) ((node)->rb_color != RB_NONE)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
-static void
-cfq_del_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
+static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
- if (crq->cfq_queue) {
- crq->cfq_queue = NULL;
-
- if (cfq_account_io(crq)) {
- cfqd->busy_rq--;
- cfqd->busy_sectors -= crq->nr_sectors;
- cfqd->cid[crq->ioprio].busy_rq--;
- cfqd->cid[crq->ioprio].busy_sectors -= crq->nr_sectors;
- }
- atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_out));
- atomic_add(crq->nr_sectors,
- &(cfqd->cid[crq->ioprio].cum_sectors_out));
+ if (ON_RB(&crq->rb_node)) {
cfqq->queued[rq_data_dir(crq->request)]--;
rb_erase(&crq->rb_node, &cfqq->sort_list);
+ crq->cfq_queue = NULL;
}
}
struct request *rq = crq->request;
struct cfq_rq *__alias;
-
+ crq->rb_key = rq_rb_key(rq);
cfqq->queued[rq_data_dir(rq)]++;
- if (cfq_account_io(crq)) {
- cfqd->busy_rq++;
- cfqd->busy_sectors += crq->nr_sectors;
- cfqd->cid[crq->ioprio].busy_rq++;
- cfqd->cid[crq->ioprio].busy_sectors += crq->nr_sectors;
- }
- atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_in));
- atomic_add(crq->nr_sectors,
- &(cfqd->cid[crq->ioprio].cum_sectors_in));
retry:
__alias = __cfq_add_crq_rb(cfqq, crq);
if (!__alias) {
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
- crq->rb_key = rq_rb_key(rq);
crq->cfq_queue = cfqq;
return;
}
static struct request *
cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
{
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+ struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
struct rb_node *n;
if (!cfqq)
static void cfq_remove_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
if (crq) {
+ struct cfq_queue *cfqq = crq->cfq_queue;
cfq_remove_merge_hints(q, crq);
- list_del_init(&crq->prio_list);
list_del_init(&rq->queuelist);
- /*
- * set a grace period timer to allow realtime io to make real
- * progress, if we release an rt request. for normal request,
- * set timer so idle io doesn't interfere with other io
- */
- if (crq->ioprio == IOPRIO_RT) {
- set_bit(CFQ_WAIT_RT, &cfqd->flags);
- cfqd->wait_end = jiffies + cfqd->cfq_grace_rt;
- } else if (crq->ioprio != IOPRIO_IDLE) {
- set_bit(CFQ_WAIT_NORM, &cfqd->flags);
- cfqd->wait_end = jiffies + cfqd->cfq_grace_idle;
- }
-
- if (crq->cfq_queue) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ if (cfqq) {
+ cfq_del_crq_rb(cfqq, crq);
if (RB_EMPTY(&cfqq->sort_list))
cfq_put_queue(cfqd, cfqq);
static void cfq_merged_request(request_queue_t *q, struct request *req)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(req);
- int tmp;
+ struct cfq_rq *crq = RQ_DATA(req);
cfq_del_crq_hash(crq);
cfq_add_crq_hash(cfqd, crq);
- if (crq->cfq_queue && (rq_rb_key(req) != crq->rb_key)) {
+ if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
struct cfq_queue *cfqq = crq->cfq_queue;
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ cfq_del_crq_rb(cfqq, crq);
cfq_add_crq_rb(cfqd, cfqq, crq);
}
- tmp = req->hard_nr_sectors - crq->nr_sectors;
- cfqd->busy_sectors += tmp;
- cfqd->cid[crq->ioprio].busy_sectors += tmp;
- atomic_add(tmp,&(cfqd->cid[crq->ioprio].cum_sectors_in));
-
- crq->nr_sectors = req->hard_nr_sectors;
-
q->last_merge = req;
}
cfq_remove_request(q, next);
}
-/*
- * sort into dispatch list, in optimal ascending order
- */
static void
cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq)
struct list_head *head = cfqd->dispatch, *entry = head;
struct request *__rq;
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ cfq_del_crq_rb(cfqq, crq);
cfq_remove_merge_hints(cfqd->queue, crq);
if (!list_empty(head)) {
list_add_tail(&crq->request->queuelist, entry);
}
-/*
- * remove from io scheduler core and put on dispatch list for service
- */
-static inline int
+static inline void
__cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
- struct cfq_rq *crq;
- unsigned long long ts, gap;
- unsigned long newavsec;
-
- crq = rb_entry_crq(rb_first(&cfqq->sort_list));
-
-#if 1
- /* Determine if queue should be skipped for being overshare */
- ts = sched_clock();
- gap = ts - cfqq->lastime;
-#ifdef LIMIT_DEBUG
- cfqq->sectorate = (cfqd->cfq_epochsectors
- * CFQ_TEMPLIM)/100;
-
-#endif
- if ((gap >= cfqd->cfq_epoch) || (gap < 0)) {
- cfqq->avsec = crq->nr_sectors ;
- cfqq->lastime = ts;
- } else {
- u64 tmp;
- /* Age old average and accumalate request to be served */
-
-// tmp = (u64) (cfqq->avsec * gap) ;
-// do_div(tmp, cfqd->cfq_epoch);
- newavsec = (unsigned long)(cfqq->avsec >> 1) + crq->nr_sectors;
-// if (crq->ioprio >= 0 && crq->ioprio <= 20)
-// cfqd->cid[crq->ioprio].lsectorate = newavsec;
-// atomic_set(&(cfqd->cid[crq->ioprio].lsectorate),
-// newavsec);
-
- if ((newavsec < cfqq->sectorate) || cfqq->skipped) {
- cfqq->avsec = newavsec ;
- cfqq->lastime = ts;
- cfqq->skipped = 0;
- } else {
- /* queue over share ; skip once */
- cfqq->skipped = 1;
-#ifdef LIMIT_DEBUG
-// atomic_inc(&(cfqd->cid[crq->ioprio].nskip));
-// if (crq->ioprio >= 0 && crq->ioprio <= 20)
-// cfqd->cid[crq->ioprio].nskip++;
-#endif
- return 0;
- }
- }
-#endif
-
-#ifdef LIMIT_DEBUG
-// if (crq->ioprio >= 0 && crq->ioprio <= 20) {
-// cfqd->cid[crq->ioprio].navsec = cfqq->avsec;
-// cfqd->cid[crq->ioprio].csectorate = cfqq->sectorate;
-// }
+ struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
-// atomic_set(&(cfqd->cid[crq->ioprio].navsec),cfqq->avsec);
-// atomic_set(&(cfqd->cid[crq->ioprio].csectorate),cfqq->sectorate);
-#endif
cfq_dispatch_sort(cfqd, cfqq, crq);
-
- /*
- * technically, for IOPRIO_RT we don't need to add it to the list.
- */
- list_add_tail(&crq->prio_list, &cfqd->cid[cfqq->ioprio].prio_list);
- return crq->nr_sectors;
}
-static int
-cfq_dispatch_requests(request_queue_t *q, int prio, int max_rq, int max_sectors)
+static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct list_head *plist = &cfqd->cid[prio].rr_list;
- struct list_head *entry, *nxt;
- int q_rq, q_io;
- int ret ;
+ struct cfq_queue *cfqq;
+ struct list_head *entry, *tmp;
+ int ret, queued, good_queues;
- /*
- * for each queue at this prio level, dispatch a request
- */
- q_rq = q_io = 0;
- list_for_each_safe(entry, nxt, plist) {
- struct cfq_queue *cfqq = list_entry_cfqq(entry);
+ if (list_empty(&cfqd->rr_list))
+ return 0;
+
+ queued = ret = 0;
+restart:
+ good_queues = 0;
+ list_for_each_safe(entry, tmp, &cfqd->rr_list) {
+ cfqq = list_entry_cfqq(cfqd->rr_list.next);
BUG_ON(RB_EMPTY(&cfqq->sort_list));
- ret = __cfq_dispatch_requests(q, cfqd, cfqq);
- if (ret <= 0) {
- continue; /* skip queue */
- /* can optimize more by moving q to end of plist ? */
- }
- q_io += ret ;
- q_rq++ ;
+ __cfq_dispatch_requests(q, cfqd, cfqq);
if (RB_EMPTY(&cfqq->sort_list))
cfq_put_queue(cfqd, cfqq);
- /*
- * if we hit the queue limit, put the string of serviced
- * queues at the back of the pending list
- */
- if (q_io >= max_sectors || q_rq >= max_rq) {
- struct list_head *prv = nxt->prev;
-
- if (prv != plist) {
- list_del(plist);
- list_add(plist, prv);
- }
- break;
- }
- }
-
- cfqd->cid[prio].last_rq = q_rq;
- cfqd->cid[prio].last_sectors = q_io;
- return q_rq;
-}
-
-/*
- * try to move some requests to the dispatch list. return 0 on success
- */
-static int cfq_select_requests(request_queue_t *q, struct cfq_data *cfqd)
-{
- int queued, busy_rq, busy_sectors, i;
-
- /*
- * if there's any realtime io, only schedule that
- */
- if (cfq_dispatch_requests(q, IOPRIO_RT, cfqd->cfq_quantum, cfqd->cfq_quantum_io))
- return 1;
-
- /*
- * if RT io was last serviced and grace time hasn't expired,
- * arm the timer to restart queueing if no other RT io has been
- * submitted in the mean time
- */
- if (test_bit(CFQ_WAIT_RT, &cfqd->flags)) {
- if (time_before(jiffies, cfqd->wait_end)) {
- mod_timer(&cfqd->timer, cfqd->wait_end);
- return 0;
- }
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- }
-
- /*
- * for each priority level, calculate number of requests we
- * are allowed to put into service.
- */
- queued = 0;
- busy_rq = cfqd->busy_rq;
- busy_sectors = cfqd->busy_sectors;
- for (i = IOPRIO_RT - 1; i > IOPRIO_IDLE; i--) {
- const int o_rq = busy_rq - cfqd->cid[i].busy_rq;
- const int o_sectors = busy_sectors - cfqd->cid[i].busy_sectors;
- int q_rq = cfqd->cfq_quantum * (i + 1) / IOPRIO_NR;
- int q_io = cfqd->cfq_quantum_io * (i + 1) / IOPRIO_NR;
-
- /*
- * no need to keep iterating the list, if there are no
- * requests pending anymore
- */
- if (!cfqd->busy_rq)
- break;
-
- /*
- * find out how many requests and sectors we are allowed to
- * service
- */
- if (o_rq)
- q_rq = o_sectors * (i + 1) / IOPRIO_NR;
- if (q_rq > cfqd->cfq_quantum)
- q_rq = cfqd->cfq_quantum;
-
- if (o_sectors)
- q_io = o_sectors * (i + 1) / IOPRIO_NR;
- if (q_io > cfqd->cfq_quantum_io)
- q_io = cfqd->cfq_quantum_io;
-
- /*
- * average with last dispatched for fairness
- */
- if (cfqd->cid[i].last_rq != -1)
- q_rq = (cfqd->cid[i].last_rq + q_rq) / 2;
- if (cfqd->cid[i].last_sectors != -1)
- q_io = (cfqd->cid[i].last_sectors + q_io) / 2;
-
- queued += cfq_dispatch_requests(q, i, q_rq, q_io);
- }
-
- if (queued)
- return 1;
+ else
+ good_queues++;
- /*
- * only allow dispatch of idle io, if the queue has been idle from
- * servicing RT or normal io for the grace period
- */
- if (test_bit(CFQ_WAIT_NORM, &cfqd->flags)) {
- if (time_before(jiffies, cfqd->wait_end)) {
- mod_timer(&cfqd->timer, cfqd->wait_end);
- return 0;
- }
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+ queued++;
+ ret = 1;
}
- /*
- * if we found nothing to do, allow idle io to be serviced
- */
- if (cfq_dispatch_requests(q, IOPRIO_IDLE, cfqd->cfq_idle_quantum, cfqd->cfq_idle_quantum_io))
- return 1;
+ if ((queued < cfqd->cfq_quantum) && good_queues)
+ goto restart;
- return 0;
+ return ret;
}
static struct request *cfq_next_request(request_queue_t *q)
if (!list_empty(cfqd->dispatch)) {
struct cfq_rq *crq;
dispatch:
- /*
- * end grace period, we are servicing a request
- */
- del_timer(&cfqd->timer);
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
-
- BUG_ON(list_empty(cfqd->dispatch));
rq = list_entry_rq(cfqd->dispatch->next);
- BUG_ON(q->last_merge == rq);
- crq = RQ_ELV_DATA(rq);
- if (crq) {
- BUG_ON(!hlist_unhashed(&crq->hash));
- list_del_init(&crq->prio_list);
- }
+ crq = RQ_DATA(rq);
+ if (crq)
+ cfq_remove_merge_hints(q, crq);
return rq;
}
- /*
- * we moved requests to dispatch list, go back end serve one
- */
- if (cfq_select_requests(q, cfqd))
+ if (cfq_dispatch_requests(q, cfqd))
goto dispatch;
return NULL;
}
static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
{
- struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
- struct hlist_node *entry;
+ struct list_head *hash_list = &cfqd->cfq_hash[hashval];
+ struct list_head *entry;
- hlist_for_each(entry, hash_list) {
+ list_for_each(entry, hash_list) {
struct cfq_queue *__cfqq = list_entry_qhash(entry);
- if (__cfqq->hash_key == hashkey)
+ if (__cfqq->pid == pid)
return __cfqq;
}
return NULL;
}
-
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey)
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
{
- const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+ const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
- return __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+ return __cfq_find_cfq_hash(cfqd, pid, hashval);
}
static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfqd->busy_queues--;
- WARN_ON(cfqd->busy_queues < 0);
-
- cfqd->cid[cfqq->ioprio].busy_queues--;
- WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues < 0);
- atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
-
list_del(&cfqq->cfq_list);
- hlist_del(&cfqq->cfq_hash);
+ list_del(&cfqq->cfq_hash);
mempool_free(cfqq, cfq_mpool);
}
-static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int hashkey,
+static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
int gfp_mask)
{
- const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+ const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
struct cfq_queue *cfqq, *new_cfqq = NULL;
request_queue_t *q = cfqd->queue;
retry:
- cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+ cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
if (!cfqq) {
if (new_cfqq) {
} else
return NULL;
- memset(cfqq, 0, sizeof(*cfqq));
- INIT_HLIST_NODE(&cfqq->cfq_hash);
+ INIT_LIST_HEAD(&cfqq->cfq_hash);
INIT_LIST_HEAD(&cfqq->cfq_list);
- cfqq->hash_key = cfq_hash_key(current);
- cfqq->ioprio = cfq_ioprio(current);
- cfqq->avsec = 0 ;
- cfqq->lastime = sched_clock();
- cfqq->sectorate = (cfqd->cfq_epochsectors * CFQ_TEMPLIM)/100;
- hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ RB_CLEAR_ROOT(&cfqq->sort_list);
+
+ cfqq->pid = pid;
+ cfqq->queued[0] = cfqq->queued[1] = 0;
+ list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
}
if (new_cfqq)
return cfqq;
}
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int hashkey,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
int gfp_mask)
{
request_queue_t *q = cfqd->queue;
struct cfq_queue *cfqq;
spin_lock_irq(q->queue_lock);
- cfqq = __cfq_get_queue(cfqd, hashkey, gfp_mask);
+ cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
spin_unlock_irq(q->queue_lock);
return cfqq;
}
-static void
-__cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
+static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
{
- const int prio = crq->ioprio;
struct cfq_queue *cfqq;
- cfqq = __cfq_get_queue(cfqd, cfq_hash_key(current), GFP_ATOMIC);
+ cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
if (cfqq) {
-
- /*
- * not too good...
- */
- if (prio > cfqq->ioprio) {
- printk("prio hash collision %d %d\n",
- prio, cfqq->ioprio);
- if (!list_empty(&cfqq->cfq_list)) {
- cfqd->cid[cfqq->ioprio].busy_queues--;
- WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues<0);
- atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
- cfqd->cid[prio].busy_queues++;
- atomic_inc(&(cfqd->cid[prio].cum_queues_in));
- list_move_tail(&cfqq->cfq_list,
- &cfqd->cid[prio].rr_list);
- }
- cfqq->ioprio = prio;
- }
-
cfq_add_crq_rb(cfqd, cfqq, crq);
if (list_empty(&cfqq->cfq_list)) {
- list_add_tail(&cfqq->cfq_list,
- &cfqd->cid[prio].rr_list);
- cfqd->cid[prio].busy_queues++;
- atomic_inc(&(cfqd->cid[prio].cum_queues_in));
+ list_add(&cfqq->cfq_list, &cfqd->rr_list);
cfqd->busy_queues++;
}
-
- if (rq_mergeable(crq->request)) {
- cfq_add_crq_hash(cfqd, crq);
-
- if (!q->last_merge)
- q->last_merge = crq->request;
- }
-
} else {
/*
* should can only happen if the request wasn't allocated
}
}
-static void cfq_reenqueue(request_queue_t *q, struct cfq_data *cfqd, int prio)
-{
- struct list_head *prio_list = &cfqd->cid[prio].prio_list;
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, prio_list) {
- struct cfq_rq *crq = list_entry_prio(entry);
-
- list_del_init(entry);
- list_del_init(&crq->request->queuelist);
- __cfq_enqueue(q, cfqd, crq);
- }
-}
-
-static void
-cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
-{
- const int prio = cfq_ioprio(current);
-
- crq->ioprio = prio;
- crq->nr_sectors = crq->request->hard_nr_sectors;
- __cfq_enqueue(q, cfqd, crq);
-
- if (prio == IOPRIO_RT) {
- int i;
-
- /*
- * realtime io gets priority, move all other io back
- */
- for (i = IOPRIO_IDLE; i < IOPRIO_RT; i++)
- cfq_reenqueue(q, cfqd, i);
- } else if (prio != IOPRIO_IDLE) {
- /*
- * check if we need to move idle io back into queue
- */
- cfq_reenqueue(q, cfqd, IOPRIO_IDLE);
- }
-}
-
static void
cfq_insert_request(request_queue_t *q, struct request *rq, int where)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
switch (where) {
case ELEVATOR_INSERT_BACK:
-#if 0
while (cfq_dispatch_requests(q, cfqd))
;
-#endif
list_add_tail(&rq->queuelist, cfqd->dispatch);
break;
case ELEVATOR_INSERT_FRONT:
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(q, cfqd, crq);
+ cfq_enqueue(cfqd, crq);
break;
default:
- printk("%s: bad insert point %d\n",
- __FUNCTION__,where);
+ printk("%s: bad insert point %d\n", __FUNCTION__,where);
return;
}
+
+ if (rq_mergeable(rq)) {
+ cfq_add_crq_hash(cfqd, crq);
+
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
}
static int cfq_queue_empty(request_queue_t *q)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- if (list_empty(cfqd->dispatch) && !cfqd->busy_queues)
+ if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
return 1;
return 0;
static struct request *
cfq_former_request(request_queue_t *q, struct request *rq)
{
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct rb_node *rbprev = rb_prev(&crq->rb_node);
if (rbprev)
static struct request *
cfq_latter_request(request_queue_t *q, struct request *rq)
{
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct rb_node *rbnext = rb_next(&crq->rb_node);
if (rbnext)
return NULL;
}
-static void cfq_queue_congested(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
-
- set_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
-}
-
static int cfq_may_queue(request_queue_t *q, int rw)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
struct cfq_queue *cfqq;
- const int prio = cfq_ioprio(current);
- int limit, ret = 1;
+ int ret = 1;
if (!cfqd->busy_queues)
goto out;
- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
- if (!cfqq)
- goto out;
-
- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
- if (!cfqq)
- goto out;
-
- /*
- * if higher or equal prio io is sleeping waiting for a request, don't
- * allow this one to allocate one. as long as ll_rw_blk does fifo
- * waitqueue wakeups this should work...
- */
- if (cfqd->rq_starved_mask & ~((1 << prio) - 1))
- goto out;
+ cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+ if (cfqq) {
+ int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
- if (cfqq->queued[rw] < cfqd->cfq_queued || !cfqd->cid[prio].busy_queues)
- goto out;
+ if (limit < 3)
+ limit = 3;
+ else if (limit > cfqd->max_queued)
+ limit = cfqd->max_queued;
- limit = q->nr_requests * (prio + 1) / IOPRIO_NR;
- limit /= cfqd->cid[prio].busy_queues;
- if (cfqq->queued[rw] > limit)
- ret = 0;
+ if (cfqq->queued[rw] > limit)
+ ret = 0;
+ }
out:
return ret;
}
static void cfq_put_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct request_list *rl;
int other_rw;
if (crq) {
BUG_ON(q->last_merge == rq);
- BUG_ON(!hlist_unhashed(&crq->hash));
+ BUG_ON(ON_MHASH(crq));
mempool_free(crq, cfqd->crq_pool);
rq->elevator_private = NULL;
/*
* prepare a queue up front, so cfq_enqueue() doesn't have to
*/
- cfqq = cfq_get_queue(cfqd, cfq_hash_key(current), gfp_mask);
+ cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
if (!cfqq)
return 1;
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- /*
- * process now has one request
- */
- clear_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
-
memset(crq, 0, sizeof(*crq));
+ RB_CLEAR(&crq->rb_node);
crq->request = rq;
- INIT_HLIST_NODE(&crq->hash);
- INIT_LIST_HEAD(&crq->prio_list);
+ crq->cfq_queue = NULL;
+ INIT_LIST_HEAD(&crq->hash);
rq->elevator_private = crq;
return 0;
}
kfree(cfqd);
}
-static void cfq_timer(unsigned long data)
-{
- struct cfq_data *cfqd = (struct cfq_data *) data;
-
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
- kblockd_schedule_work(&cfqd->work);
-}
-
-static void cfq_work(void *data)
-{
- request_queue_t *q = data;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (cfq_next_request(q))
- q->request_fn(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
static int cfq_init(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
return -ENOMEM;
memset(cfqd, 0, sizeof(*cfqd));
- init_timer(&cfqd->timer);
- cfqd->timer.function = cfq_timer;
- cfqd->timer.data = (unsigned long) cfqd;
-
- INIT_WORK(&cfqd->work, cfq_work, q);
-
- for (i = 0; i < IOPRIO_NR; i++) {
- struct io_prio_data *cid = &cfqd->cid[i];
-
- INIT_LIST_HEAD(&cid->rr_list);
- INIT_LIST_HEAD(&cid->prio_list);
- cid->last_rq = -1;
- cid->last_sectors = -1;
-
- atomic_set(&cid->cum_rq_in,0);
- atomic_set(&cid->cum_rq_out,0);
- atomic_set(&cid->cum_sectors_in,0);
- atomic_set(&cid->cum_sectors_out,0);
- atomic_set(&cid->cum_queues_in,0);
- atomic_set(&cid->cum_queues_out,0);
-#if 0
- atomic_set(&cid->nskip,0);
- atomic_set(&cid->navsec,0);
- atomic_set(&cid->csectorate,0);
- atomic_set(&cid->lsectorate,0);
-#endif
- }
+ INIT_LIST_HEAD(&cfqd->rr_list);
- cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES,
- GFP_KERNEL);
+ cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->crq_hash)
goto out_crqhash;
- cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES,
- GFP_KERNEL);
+ cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->cfq_hash)
goto out_cfqhash;
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, crq_pool);
+ cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
+ INIT_LIST_HEAD(&cfqd->crq_hash[i]);
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
-
- cfqd->cfq_queued = cfq_queued;
- cfqd->cfq_quantum = cfq_quantum;
- cfqd->cfq_quantum_io = cfq_quantum_io;
- cfqd->cfq_idle_quantum = cfq_idle_quantum;
- cfqd->cfq_idle_quantum_io = cfq_idle_quantum_io;
- cfqd->cfq_grace_rt = cfq_grace_rt;
- cfqd->cfq_grace_idle = cfq_grace_idle;
-
- q->nr_requests <<= 2;
+ INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
cfqd->dispatch = &q->queue_head;
e->elevator_data = cfqd;
cfqd->queue = q;
- cfqd->cfq_epoch = CFQ_EPOCH;
- if (q->hardsect_size)
- cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/
- q->hardsect_size)* (1000000 / CFQ_EPOCH);
- else
- cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/512)
- * (1000000 / CFQ_EPOCH) ;
+ /*
+ * just set it to some high value, we want anyone to be able to queue
+ * some requests. fairness is handled differently
+ */
+ cfqd->max_queued = q->nr_requests;
+ q->nr_requests = 8192;
+
+ cfqd->cfq_queued = cfq_queued;
+ cfqd->cfq_quantum = cfq_quantum;
return 0;
out_crqpool:
return cfq_var_show(__VAR, (page)); \
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
-SHOW_FUNCTION(cfq_quantum_io_show, cfqd->cfq_quantum_io);
-SHOW_FUNCTION(cfq_idle_quantum_show, cfqd->cfq_idle_quantum);
-SHOW_FUNCTION(cfq_idle_quantum_io_show, cfqd->cfq_idle_quantum_io);
SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
-SHOW_FUNCTION(cfq_grace_rt_show, cfqd->cfq_grace_rt);
-SHOW_FUNCTION(cfq_grace_idle_show, cfqd->cfq_grace_idle);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
return ret; \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
-STORE_FUNCTION(cfq_quantum_io_store, &cfqd->cfq_quantum_io, 4, INT_MAX);
-STORE_FUNCTION(cfq_idle_quantum_store, &cfqd->cfq_idle_quantum, 1, INT_MAX);
-STORE_FUNCTION(cfq_idle_quantum_io_store, &cfqd->cfq_idle_quantum_io, 4, INT_MAX);
STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
-STORE_FUNCTION(cfq_grace_rt_store, &cfqd->cfq_grace_rt, 0, INT_MAX);
-STORE_FUNCTION(cfq_grace_idle_store, &cfqd->cfq_grace_idle, 0, INT_MAX);
#undef STORE_FUNCTION
-
-static ssize_t cfq_epoch_show(struct cfq_data *cfqd, char *page)
-{
- return sprintf(page, "%lu\n", cfqd->cfq_epoch);
-}
-
-static ssize_t cfq_epoch_store(struct cfq_data *cfqd, const char *page, size_t count)
-{
- char *p = (char *) page;
- cfqd->cfq_epoch = simple_strtoul(p, &p, 10);
- return count;
-}
-
-static ssize_t cfq_epochsectors_show(struct cfq_data *cfqd, char *page)
-{
- return sprintf(page, "%lu\n", cfqd->cfq_epochsectors);
-}
-
-static ssize_t
-cfq_epochsectors_store(struct cfq_data *cfqd, const char *page, size_t count)
-{
- char *p = (char *) page;
- cfqd->cfq_epochsectors = simple_strtoul(p, &p, 10);
- return count;
-}
-
-/* Additional entries to get priority level data */
-static ssize_t
-cfq_prio_show(struct cfq_data *cfqd, char *page, unsigned int priolvl)
-{
- int r1,r2,s1,s2,q1,q2;
-
- if (!(priolvl >= IOPRIO_IDLE && priolvl <= IOPRIO_RT))
- return 0;
-
- r1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_in));
- r2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_out));
- s1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_in));
- s2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_out));
- q1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_in));
- q2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_out));
-
- return sprintf(page,"skip %d avsec %lu rate %lu new %lu"
- "rq (%d,%d) sec (%d,%d) q (%d,%d)\n",
- cfqd->cid[priolvl].nskip,
- cfqd->cid[priolvl].navsec,
- cfqd->cid[priolvl].csectorate,
- cfqd->cid[priolvl].lsectorate,
-// atomic_read(&cfqd->cid[priolvl].nskip),
-// atomic_read(&cfqd->cid[priolvl].navsec),
-// atomic_read(&cfqd->cid[priolvl].csectorate),
-// atomic_read(&cfqd->cid[priolvl].lsectorate),
- r1,r2,
- s1,s2,
- q1,q2);
-}
-
-#define SHOW_PRIO_DATA(__PRIOLVL) \
-static ssize_t cfq_prio_##__PRIOLVL##_show(struct cfq_data *cfqd, char *page) \
-{ \
- return cfq_prio_show(cfqd,page,__PRIOLVL); \
-}
-SHOW_PRIO_DATA(0);
-SHOW_PRIO_DATA(1);
-SHOW_PRIO_DATA(2);
-SHOW_PRIO_DATA(3);
-SHOW_PRIO_DATA(4);
-SHOW_PRIO_DATA(5);
-SHOW_PRIO_DATA(6);
-SHOW_PRIO_DATA(7);
-SHOW_PRIO_DATA(8);
-SHOW_PRIO_DATA(9);
-SHOW_PRIO_DATA(10);
-SHOW_PRIO_DATA(11);
-SHOW_PRIO_DATA(12);
-SHOW_PRIO_DATA(13);
-SHOW_PRIO_DATA(14);
-SHOW_PRIO_DATA(15);
-SHOW_PRIO_DATA(16);
-SHOW_PRIO_DATA(17);
-SHOW_PRIO_DATA(18);
-SHOW_PRIO_DATA(19);
-SHOW_PRIO_DATA(20);
-#undef SHOW_PRIO_DATA
-
-
-static ssize_t cfq_prio_store(struct cfq_data *cfqd, const char *page, size_t count, int priolvl)
-{
- atomic_set(&(cfqd->cid[priolvl].cum_rq_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_rq_out),0);
- atomic_set(&(cfqd->cid[priolvl].cum_sectors_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_sectors_out),0);
- atomic_set(&(cfqd->cid[priolvl].cum_queues_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_queues_out),0);
-
- return count;
-}
-
-
-#define STORE_PRIO_DATA(__PRIOLVL) \
-static ssize_t cfq_prio_##__PRIOLVL##_store(struct cfq_data *cfqd, const char *page, size_t count) \
-{ \
- return cfq_prio_store(cfqd,page,count,__PRIOLVL); \
-}
-STORE_PRIO_DATA(0);
-STORE_PRIO_DATA(1);
-STORE_PRIO_DATA(2);
-STORE_PRIO_DATA(3);
-STORE_PRIO_DATA(4);
-STORE_PRIO_DATA(5);
-STORE_PRIO_DATA(6);
-STORE_PRIO_DATA(7);
-STORE_PRIO_DATA(8);
-STORE_PRIO_DATA(9);
-STORE_PRIO_DATA(10);
-STORE_PRIO_DATA(11);
-STORE_PRIO_DATA(12);
-STORE_PRIO_DATA(13);
-STORE_PRIO_DATA(14);
-STORE_PRIO_DATA(15);
-STORE_PRIO_DATA(16);
-STORE_PRIO_DATA(17);
-STORE_PRIO_DATA(18);
-STORE_PRIO_DATA(19);
-STORE_PRIO_DATA(20);
-#undef STORE_PRIO_DATA
-
-
static struct cfq_fs_entry cfq_quantum_entry = {
.attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
.show = cfq_quantum_show,
.store = cfq_quantum_store,
};
-static struct cfq_fs_entry cfq_quantum_io_entry = {
- .attr = {.name = "quantum_io", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_quantum_io_show,
- .store = cfq_quantum_io_store,
-};
-static struct cfq_fs_entry cfq_idle_quantum_entry = {
- .attr = {.name = "idle_quantum", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_idle_quantum_show,
- .store = cfq_idle_quantum_store,
-};
-static struct cfq_fs_entry cfq_idle_quantum_io_entry = {
- .attr = {.name = "idle_quantum_io", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_idle_quantum_io_show,
- .store = cfq_idle_quantum_io_store,
-};
static struct cfq_fs_entry cfq_queued_entry = {
.attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
.show = cfq_queued_show,
.store = cfq_queued_store,
};
-static struct cfq_fs_entry cfq_grace_rt_entry = {
- .attr = {.name = "grace_rt", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_grace_rt_show,
- .store = cfq_grace_rt_store,
-};
-static struct cfq_fs_entry cfq_grace_idle_entry = {
- .attr = {.name = "grace_idle", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_grace_idle_show,
- .store = cfq_grace_idle_store,
-};
-static struct cfq_fs_entry cfq_epoch_entry = {
- .attr = {.name = "epoch", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_epoch_show,
- .store = cfq_epoch_store,
-};
-static struct cfq_fs_entry cfq_epochsectors_entry = {
- .attr = {.name = "epochsectors", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_epochsectors_show,
- .store = cfq_epochsectors_store,
-};
-
-#define P_0_STR "p0"
-#define P_1_STR "p1"
-#define P_2_STR "p2"
-#define P_3_STR "p3"
-#define P_4_STR "p4"
-#define P_5_STR "p5"
-#define P_6_STR "p6"
-#define P_7_STR "p7"
-#define P_8_STR "p8"
-#define P_9_STR "p9"
-#define P_10_STR "p10"
-#define P_11_STR "p11"
-#define P_12_STR "p12"
-#define P_13_STR "p13"
-#define P_14_STR "p14"
-#define P_15_STR "p15"
-#define P_16_STR "p16"
-#define P_17_STR "p17"
-#define P_18_STR "p18"
-#define P_19_STR "p19"
-#define P_20_STR "p20"
-
-
-#define CFQ_PRIO_SYSFS_ENTRY(__PRIOLVL) \
-static struct cfq_fs_entry cfq_prio_##__PRIOLVL##_entry = { \
- .attr = {.name = P_##__PRIOLVL##_STR, .mode = S_IRUGO | S_IWUSR }, \
- .show = cfq_prio_##__PRIOLVL##_show, \
- .store = cfq_prio_##__PRIOLVL##_store, \
-};
-CFQ_PRIO_SYSFS_ENTRY(0);
-CFQ_PRIO_SYSFS_ENTRY(1);
-CFQ_PRIO_SYSFS_ENTRY(2);
-CFQ_PRIO_SYSFS_ENTRY(3);
-CFQ_PRIO_SYSFS_ENTRY(4);
-CFQ_PRIO_SYSFS_ENTRY(5);
-CFQ_PRIO_SYSFS_ENTRY(6);
-CFQ_PRIO_SYSFS_ENTRY(7);
-CFQ_PRIO_SYSFS_ENTRY(8);
-CFQ_PRIO_SYSFS_ENTRY(9);
-CFQ_PRIO_SYSFS_ENTRY(10);
-CFQ_PRIO_SYSFS_ENTRY(11);
-CFQ_PRIO_SYSFS_ENTRY(12);
-CFQ_PRIO_SYSFS_ENTRY(13);
-CFQ_PRIO_SYSFS_ENTRY(14);
-CFQ_PRIO_SYSFS_ENTRY(15);
-CFQ_PRIO_SYSFS_ENTRY(16);
-CFQ_PRIO_SYSFS_ENTRY(17);
-CFQ_PRIO_SYSFS_ENTRY(18);
-CFQ_PRIO_SYSFS_ENTRY(19);
-CFQ_PRIO_SYSFS_ENTRY(20);
-#undef CFQ_PRIO_SYSFS_ENTRY
static struct attribute *default_attrs[] = {
&cfq_quantum_entry.attr,
- &cfq_quantum_io_entry.attr,
- &cfq_idle_quantum_entry.attr,
- &cfq_idle_quantum_io_entry.attr,
&cfq_queued_entry.attr,
- &cfq_grace_rt_entry.attr,
- &cfq_grace_idle_entry.attr,
- &cfq_epoch_entry.attr,
- &cfq_epochsectors_entry.attr,
- &cfq_prio_0_entry.attr,
- &cfq_prio_1_entry.attr,
- &cfq_prio_2_entry.attr,
- &cfq_prio_3_entry.attr,
- &cfq_prio_4_entry.attr,
- &cfq_prio_5_entry.attr,
- &cfq_prio_6_entry.attr,
- &cfq_prio_7_entry.attr,
- &cfq_prio_8_entry.attr,
- &cfq_prio_9_entry.attr,
- &cfq_prio_10_entry.attr,
- &cfq_prio_11_entry.attr,
- &cfq_prio_12_entry.attr,
- &cfq_prio_13_entry.attr,
- &cfq_prio_14_entry.attr,
- &cfq_prio_15_entry.attr,
- &cfq_prio_16_entry.attr,
- &cfq_prio_17_entry.attr,
- &cfq_prio_18_entry.attr,
- &cfq_prio_19_entry.attr,
- &cfq_prio_20_entry.attr,
NULL,
};
.elevator_set_req_fn = cfq_set_request,
.elevator_put_req_fn = cfq_put_request,
.elevator_may_queue_fn = cfq_may_queue,
- .elevator_set_congested_fn = cfq_queue_congested,
.elevator_init_fn = cfq_init,
.elevator_exit_fn = cfq_exit,
};
+++ /dev/null
-/* linux/drivers/block/ckrm_io.c : Block I/O Resource Controller for CKRM
- *
- * Copyright (C) Shailabh Nagar, IBM Corp. 2004
- *
- *
- * Provides best-effort block I/O bandwidth control for CKRM
- * This file provides the CKRM API. The underlying scheduler is a
- * modified Complete-Fair Queueing (CFQ) iosched.
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- *
- * 29 July 2004
- * Third complete rewrite for CKRM's current API
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <asm/errno.h>
-#include <asm/div64.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/fs.h>
-
-#include <linux/ckrm_tc.h>
-#include <linux/ckrm-io.h>
-
-/* Tie to cfq priorities */
-#define CKI_IOPRIO_NORM IOPRIO_NORM
-
-/* Divisor to get fraction of bandwidth represented by an IOPRIO value */
-/* FIXME: Will not work if IOPRIO_NR > 100 */
-#define CKI_IOPRIO_DIV (IOPRIO_NR-1)
-/* Minimum ioprio value to be assigned to a class */
-#define CKI_IOPRIO_MIN 1
-
-#define CKI_IOUSAGE_UNIT 512
-
-typedef struct ckrm_io_stats{
- struct timeval epochstart ; /* all measurements relative to this
- start time */
- unsigned long blksz; /* size of bandwidth unit */
- atomic_t blkrd; /* read units submitted to DD */
- atomic_t blkwr; /* write units submitted to DD */
-
-} cki_stats_t; /* per class I/O statistics */
-
-/* Note
- * Currently local unit == CFQ I/O priority directly.
- * CFQ ionice values have an implied bandwidth share so they
- * can be added, subdivided etc. as long as the initial allocation
- * of the systemwide default's total is set to the highest CFQ ionice
- * value (== 100% of disk bandwidth)
- */
-
-typedef struct ckrm_io_class {
-
- struct ckrm_core_class *core;
- struct ckrm_core_class *parent;
-
- struct ckrm_shares shares;
- spinlock_t shares_lock; /* protect share changes */
-
- /* Absolute shares of this class
- * in local units.
- */
-
- int cnt_guarantee; /* Allocation as parent */
- int cnt_unused; /* Allocation to default subclass */
-
- /* Statistics, for class and default subclass */
- cki_stats_t stats;
- cki_stats_t mystats;
-
-} cki_icls_t;
-
-
-
-/* Internal functions */
-static inline void cki_reset_stats(cki_stats_t *usg);
-static inline void init_icls_one(cki_icls_t *icls);
-static inline int cki_div(int *a, int b, int c);
-//static inline int cki_recalc(cki_icls_t *icls, int rel2abs);
-static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres);
-
-/* External functions e.g. interface to ioscheduler */
-void *cki_tsk_icls (struct task_struct *tsk);
-int cki_tsk_ioprio (struct task_struct *tsk);
-
-extern void cki_cfq_set(icls_tsk_t tskicls, icls_ioprio_t tskioprio);
-
-/* CKRM Resource Controller API functions */
-static void * cki_alloc(struct ckrm_core_class *this,
- struct ckrm_core_class * parent);
-static void cki_free(void *res);
-static int cki_setshare(void *res, struct ckrm_shares * shares);
-static int cki_getshare(void *res, struct ckrm_shares * shares);
-static int cki_getstats(void *res, struct seq_file *);
-static int cki_resetstats(void *res);
-static int cki_showconfig(void *res, struct seq_file *sfile);
-static int cki_setconfig(void *res, const char *cfgstr);
-static void cki_chgcls(void *tsk, void *oldres, void *newres);
-
-
-struct ckrm_res_ctlr cki_rcbs;
-
-static inline void cki_reset_stats(cki_stats_t *stats)
-{
- if (stats) {
- atomic_set(&stats->blkrd,0);
- atomic_set(&stats->blkwr,0);
- }
-}
-
-static inline void init_icls_stats(cki_icls_t *icls)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
- icls->stats.epochstart = icls->mystats.epochstart = tv;
- icls->stats.blksz = icls->mystats.blksz = CKI_IOUSAGE_UNIT;
- cki_reset_stats(&icls->stats);
- cki_reset_stats(&icls->mystats);
-}
-
-/* Initialize icls to default values
- * No other classes touched, locks not reinitialized.
- */
-
-static inline void init_icls_one(cki_icls_t *icls)
-{
- // Assign zero as initial guarantee otherwise creations
- // could fail due to inadequate share
-
- //icls->shares.my_guarantee =
- // (CKI_IOPRIO_MIN * CKRM_SHARE_DFLT_TOTAL_GUARANTEE) /
- // CKI_IOPRIO_DIV ;
- icls->shares.my_guarantee = 0;
- icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- icls->shares.unused_guarantee = icls->shares.total_guarantee -
- icls->shares.my_guarantee;
- icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
-
- icls->cnt_guarantee = icls->cnt_unused = IOPRIO_IDLE;
-
- //Same rationale icls->ioprio = CKI_IOPRIO_MIN;
- //IOPRIO_IDLE equivalence to zero my_guarantee (set above) relies
- //on former being zero.
-
- init_icls_stats(icls);
-}
-
-
-static inline int cki_div(int *a, int b, int c)
-{
- u64 temp = (u64) b * c ;
- do_div(temp,CKI_IOPRIO_DIV);
- *a = (int) temp;
-
- return 0;
-}
-
-
-/* Recalculate absolute shares from relative (rel2abs=1)
- * or vice versa (rel2abs=0)
- * Caller should have a lock on icls
- */
-
-static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres)
-{
-
- ckrm_core_class_t *child = NULL;
- cki_icls_t *childres;
- int resid = cki_rcbs.resid;
-
- if (parres) {
- struct ckrm_shares *par = &parres->shares;
- struct ckrm_shares *self = &res->shares;
-
-
-
- if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- res->cnt_guarantee = CKRM_SHARE_DONTCARE;
- } else if (par->total_guarantee) {
- u64 temp = (u64) self->my_guarantee *
- parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- res->cnt_guarantee = (int) temp;
- } else {
- res->cnt_guarantee = 0;
- }
-
- if (res->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- res->cnt_unused = CKRM_SHARE_DONTCARE;
- } else if (self->total_guarantee) {
- u64 temp = (u64) self->unused_guarantee *
- res->cnt_guarantee;
- do_div(temp, self->total_guarantee);
- res->cnt_unused = (int) temp;
- } else {
- res->cnt_unused = 0;
- }
- }
- // propagate to children
- ckrm_lock_hier(res->core);
- while ((child = ckrm_get_next_child(res->core,child)) != NULL){
- childres = ckrm_get_res_class(child, resid,
- cki_icls_t);
-
- spin_lock(&childres->shares_lock);
- cki_recalc_propagate(childres, res);
- spin_unlock(&childres->shares_lock);
- }
- ckrm_unlock_hier(res->core);
-}
-
-#if 0
-static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
-{
- u64 temp;
-
- if (icls->parent == NULL) {
- /* Root, as parent, always gets all */
-
- temp = icls->shares.my_guarantee * (IOPRIO_NR-1);
- do_div(temp, icls->shares.total_guarantee);
-
- icls->total = IOPRIO_NR-1;
- icls->ioprio = temp ;
- icls->unused = icls->total - icls->ioprio;
-// icls->unused = (IOPRIO_NR-1)-icls->ioprio;
-
- } else {
- cki_icls_t *parres;
- int partot ;
-
- parres = ckrm_get_res_class(icls->parent,
- cki_rcbs.resid,
- cki_icls_t);
- if (!parres) {
- printk(KERN_ERR "cki_recalc: error getting "
- "resclass from core \n");
- return -EINVAL;
- }
-
-
- temp = (icls->shares.my_guarantee *
- parres->total);
- do_div(temp, parres->shares.total_guarantee);
-
- icls->ioprio = temp;
- icls->unused = 0;
-
- }
-
- return 0;
-
-}
-#endif
-
-void *cki_tsk_icls(struct task_struct *tsk)
-{
- return (void *) ckrm_get_res_class(class_core(tsk->taskclass),
- cki_rcbs.resid, cki_icls_t);
-}
-
-int cki_tsk_ioprio(struct task_struct *tsk)
-{
- cki_icls_t *icls = ckrm_get_res_class(class_core(tsk->taskclass),
- cki_rcbs.resid, cki_icls_t);
- return icls->cnt_unused;
-}
-
-static void *cki_alloc(struct ckrm_core_class *core,
- struct ckrm_core_class *parent)
-{
- cki_icls_t *icls;
-
- icls = kmalloc(sizeof(cki_icls_t), GFP_ATOMIC);
- if (!icls) {
- printk(KERN_ERR "cki_res_alloc failed GFP_ATOMIC\n");
- return NULL;
- }
-
- memset(icls, 0, sizeof(cki_icls_t));
- icls->core = core;
- icls->parent = parent;
- icls->shares_lock = SPIN_LOCK_UNLOCKED;
-
- if (parent == NULL) {
-
- /* Root class gets same as "normal" CFQ priorities to
- * retain compatibility of behaviour in the absence of
- * other classes
- */
-
- icls->cnt_guarantee = icls->cnt_unused = IOPRIO_NR-1;
-
- /* Default gets normal, not minimum */
- //icls->unused = IOPRIO_NORM;
- //icls->unused = icls->guarantee-icls->myguarantee;
- //icls->limit = icls->mylimit = IOPRIO_NR;
-
- /* Compute shares in abstract units */
- icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- // my_guarantee for root is meaningless. Set to default
- icls->shares.my_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- icls->shares.unused_guarantee =
- CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- //temp = (u64) icls->cnt_unused * icls->shares.total_guarantee;
- //do_div(temp, CKI_IOPRIO_DIV);
- // temp now has root's default's share
- //icls->shares.unused_guarantee =
- // icls->shares.total_guarantee - temp;
-
- icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- } else {
- init_icls_one(icls);
- /* No propagation to parent needed if icls'
- initial share is zero */
- }
- try_module_get(THIS_MODULE);
- return icls;
-}
-
-static void cki_free(void *res)
-{
- cki_icls_t *icls = res, *parres;
-
- if (!res)
- return;
-
- /* Deallocate CFQ queues */
-
- /* Currently CFQ queues are deallocated when empty. Since no task
- * should belong to this icls, no new requests will get added to the
- * CFQ queue.
- *
- * When CFQ switches to persistent queues, call its "put" function
- * so it gets deallocated after the last pending request is serviced.
- *
- */
-
- parres = ckrm_get_res_class(icls->parent,
- cki_rcbs.resid,
- cki_icls_t);
- if (!parres) {
- printk(KERN_ERR "cki_free: error getting "
- "resclass from core \n");
- return;
- }
-
- /* Update parent's shares */
- spin_lock(&parres->shares_lock);
- child_guarantee_changed(&parres->shares, icls->shares.my_guarantee, 0);
- parres->cnt_unused += icls->cnt_guarantee;
- spin_unlock(&parres->shares_lock);
-
- kfree(res);
- module_put(THIS_MODULE);
- return;
-}
-
-
-static int cki_setshare(void *res, struct ckrm_shares *new)
-{
- cki_icls_t *icls = res, *parres;
- struct ckrm_shares *cur, *par;
- int rc = -EINVAL, resid = cki_rcbs.resid;
-
- if (!icls) {
- printk(KERN_ERR "No class\n");
- return rc;
- }
-
- cur = &icls->shares;
-
- /* limits not supported */
- if ((new->max_limit != CKRM_SHARE_UNCHANGED)
- || (new->my_limit != CKRM_SHARE_UNCHANGED)) {
- printk(KERN_ERR "limits not supported\n");
- return -EINVAL;
- }
-
- if (icls->parent) {
- parres =
- ckrm_get_res_class(icls->parent, resid, cki_icls_t);
- if (!parres) {
- printk(KERN_ERR "cki_setshare: error getting "
- "resclass from core \n");
- return -EINVAL;
- }
- spin_lock(&parres->shares_lock);
- spin_lock(&icls->shares_lock);
- par = &parres->shares;
- } else {
- spin_lock(&icls->shares_lock);
- parres = NULL;
- par = NULL;
- }
-
- rc = set_shares(new, cur, par);
- printk(KERN_ERR "rc from set_shares %d\n", rc);
-
- if ((!rc) && parres) {
-
- if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- parres->cnt_unused = CKRM_SHARE_DONTCARE;
- } else if (par->total_guarantee) {
- u64 temp = (u64) par->unused_guarantee *
- parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- parres->cnt_unused = (int) temp;
- } else {
- parres->cnt_unused = 0;
- }
- cki_recalc_propagate(res, parres);
-
-#if 0
- int old = icls->ioprio;
-
- rc = cki_recalc(icls,0);
-
- if (!rc && parres) {
- int raise_tot = icls->ioprio - old ;
- parres->unused -= raise_tot ;
- }
-#endif
- }
- spin_unlock(&icls->shares_lock);
- if (icls->parent) {
- spin_unlock(&parres->shares_lock);
- }
- return rc;
-}
-
-static int cki_getshare(void *res, struct ckrm_shares * shares)
-{
- cki_icls_t *icls = res;
-
- if (!icls)
- return -EINVAL;
- *shares = icls->shares;
- return 0;
-}
-
-static int cki_getstats(void *res, struct seq_file *sfile)
-{
- cki_icls_t *icls = res;
-
- if (!icls)
- return -EINVAL;
-
-/*
- seq_printf(sfile, "%d my_read\n",atomic_read(&icls->mystats.blkrd));
- seq_printf(sfile, "%d my_write\n",atomic_read(&icls->mystats.blkwr));
- seq_printf(sfile, "%d total_read\n",atomic_read(&icls->stats.blkrd));
- seq_printf(sfile, "%d total_write\n",atomic_read(&icls->stats.blkwr));
-*/
-
- seq_printf(sfile, "%d total ioprio\n",icls->cnt_guarantee);
- seq_printf(sfile, "%d unused/default ioprio\n",icls->cnt_unused);
-
- return 0;
-}
-
-static int cki_resetstats(void *res)
-{
- cki_icls_t *icls = res;
-
- if (!res)
- return -EINVAL;
-
- init_icls_stats(icls);
- return 0;
-}
-
-static int cki_showconfig(void *res, struct seq_file *sfile)
-{
- return -ENOSYS;
-}
-
-static int cki_setconfig(void *res, const char *cfgstr)
-{
- return -ENOSYS;
-}
-
-static void cki_chgcls(void *tsk, void *oldres, void *newres)
-{
- /* cki_icls_t *oldicls = oldres, *newicls = newres; */
-
- /* Nothing needs to be done
- * Future requests from task will go to the new class's CFQ q
- * Old ones will continue to get satisfied from the original q
- *
- * Once CFQ moves to a persistent queue model and if refcounts on
- * icls's CFQ queues are used, a decrement op would be needed here
- */
-
- return;
-}
-
-
-
-struct ckrm_res_ctlr cki_rcbs = {
- .res_name = "io",
- .res_hdepth = 1,
- .resid = -1,
- .res_alloc = cki_alloc,
- .res_free = cki_free,
- .set_share_values = cki_setshare,
- .get_share_values = cki_getshare,
- .get_stats = cki_getstats,
- .reset_stats = cki_resetstats,
- .show_config = cki_showconfig,
- .set_config = cki_setconfig,
- .change_resclass = cki_chgcls,
-};
-
-
-
-int __init cki_init(void)
-{
- struct ckrm_classtype *clstype;
- int resid = cki_rcbs.resid;
-
- clstype = ckrm_find_classtype_by_name("taskclass");
- if (clstype == NULL) {
- printk(KERN_INFO "init_cki: classtype<taskclass> not found\n");
- return -ENOENT;
- }
-
- if (resid == -1) {
- resid = ckrm_register_res_ctlr(clstype, &cki_rcbs);
- if (resid != -1) {
- cki_rcbs.classtype = clstype;
- cki_cfq_set(cki_tsk_icls,cki_tsk_ioprio);
- }
- }
-
- return 0;
-}
-
-void __exit cki_exit(void)
-{
- ckrm_unregister_res_ctlr(&cki_rcbs);
- cki_rcbs.resid = -1;
- cki_rcbs.classtype = NULL;
- cki_cfq_set(NULL,NULL);
-}
-
-module_init(cki_init)
-module_exit(cki_exit)
-
-MODULE_AUTHOR("Shailabh Nagar <nagar@watson.ibm.com>");
-MODULE_DESCRIPTION("CKRM Disk I/O Resource Controller");
-MODULE_LICENSE("GPL");
-
+++ /dev/null
-/* ckrm-iostub.c - Stub file for ckrm_io module
- *
- * Copyright (C) Shailabh Nagar, IBM Corp. 2004
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- *
- * 07 Aug 2004: Created
- *
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/ckrm-io.h>
-
-static spinlock_t stub_lock = SPIN_LOCK_UNLOCKED;
-
-static icls_tsk_t tskiclstub;
-static icls_ioprio_t tskiopriostub;
-
-
-void cki_cfq_set(icls_tsk_t tskicls, icls_ioprio_t tskioprio)
-{
- spin_lock(&stub_lock);
- tskiclstub = tskicls;
- tskiopriostub = tskioprio;
- spin_unlock(&stub_lock);
-}
-
-void *cki_hash_key(struct task_struct *tsk)
-{
- void *ret;
- spin_lock(&stub_lock);
- if (tskiclstub)
- ret = (*tskiclstub)(tsk);
- else
- ret = (void *) tsk->tgid;
- spin_unlock(&stub_lock);
- return ret;
-}
-
-int cki_ioprio(struct task_struct *tsk)
-{
- int ret;
- spin_lock(&stub_lock);
- if (tskiopriostub)
- ret = (*tskiopriostub)(tsk);
- else
- ret = tsk->ioprio;
- spin_unlock(&stub_lock);
- return ret;
-}
-
-EXPORT_SYMBOL(cki_cfq_set);
-EXPORT_SYMBOL(cki_hash_key);
-EXPORT_SYMBOL(cki_ioprio);
e->elevator_put_req_fn(q, rq);
}
-void elv_set_congested(request_queue_t *q)
-{
- elevator_t *e = &q->elevator;
-
- if (e->elevator_set_congested_fn)
- e->elevator_set_congested_fn(q);
-}
-
int elv_may_queue(request_queue_t *q, int rw)
{
elevator_t *e = &q->elevator;
if (e->elevator_may_queue_fn)
return e->elevator_may_queue_fn(q, rw);
- return 1;
+ return 0;
}
void elv_completed_request(request_queue_t *q, struct request *rq)
printk("\n");
} else
DPRINT("botched floppy option\n");
- DPRINT("Read Documentation/floppy.txt\n");
+ DPRINT("Read linux/Documentation/floppy.txt\n");
return 0;
}
return 0;
}
-EXPORT_SYMBOL(blk_queue_resize_tags);
-
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
printk("Using %s io scheduler\n", chosen_elevator->elevator_name);
}
+ if (elevator_init(q, chosen_elevator))
+ goto out_elv;
+
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- /*
- * all done
- */
- if (!elevator_init(q, chosen_elevator))
- return q;
-
+ return q;
+out_elv:
blk_cleanup_queue(q);
out_init:
kmem_cache_free(requestq_cachep, q);
struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
-
- if (!elv_may_queue(q, rw))
- goto out_lock;
-
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- if (blk_queue_full(q, rw) && !ioc_batching(ioc))
- goto out_lock;
+ if (blk_queue_full(q, rw)
+ && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
+ /*
+ * The queue is full and the allocating process is not a
+ * "batcher", and not exempted by the IO scheduler
+ */
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+ }
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw);
- goto out_lock;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
}
if (ioc_batching(ioc))
out:
put_io_context(ioc);
return rq;
-out_lock:
- if (!rq)
- elv_set_congested(q);
- spin_unlock_irq(q->queue_lock);
- goto out;
}
/*
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
- *
- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
- * before being submitted to the device, as pages mapped may be out of
- * reach. It's the callers responsibility to make sure this happens. The
- * original bio must be passed back in to blk_rq_unmap_user() for proper
- * unmapping.
*/
struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
unsigned int len)
{
- unsigned long uaddr;
- struct request *rq;
+ struct request *rq = NULL;
+ char *buf = NULL;
struct bio *bio;
-
- if (len > (q->max_sectors << 9))
- return ERR_PTR(-EINVAL);
- if ((!len && ubuf) || (len && !ubuf))
- return ERR_PTR(-EINVAL);
+ int ret;
rq = blk_get_request(q, rw, __GFP_WAIT);
if (!rq)
return ERR_PTR(-ENOMEM);
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
- else
- bio = bio_copy_user(q, uaddr, len, rw == READ);
+ bio = bio_map_user(q, NULL, (unsigned long) ubuf, len, rw == READ);
+ if (!bio) {
+ int bytes = (len + 511) & ~511;
- if (!IS_ERR(bio)) {
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
+ buf = kmalloc(bytes, q->bounce_gfp | GFP_USER);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fault;
+ }
- rq->buffer = rq->data = NULL;
- rq->data_len = len;
- return rq;
+ if (rw == WRITE) {
+ if (copy_from_user(buf, ubuf, len)) {
+ ret = -EFAULT;
+ goto fault;
+ }
+ } else
+ memset(buf, 0, len);
}
- /*
- * bio is the err-ptr
- */
- blk_put_request(rq);
- return (struct request *) bio;
+ rq->bio = rq->biotail = bio;
+ if (rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+
+ rq->buffer = rq->data = buf;
+ rq->data_len = len;
+ return rq;
+fault:
+ if (buf)
+ kfree(buf);
+ if (bio)
+ bio_unmap_user(bio, 1);
+ if (rq)
+ blk_put_request(rq);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(blk_rq_map_user);
* Description:
* Unmap a request previously mapped by blk_rq_map_user().
*/
-int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq, void __user *ubuf, struct bio *bio,
+ unsigned int ulen)
{
+ const int read = rq_data_dir(rq) == READ;
int ret = 0;
- if (bio) {
- if (bio_flagged(bio, BIO_USER_MAPPED))
- bio_unmap_user(bio);
- else
- ret = bio_uncopy_user(bio);
+ if (bio)
+ bio_unmap_user(bio, read);
+ if (rq->buffer) {
+ if (read && copy_to_user(ubuf, rq->buffer, ulen))
+ ret = -EFAULT;
+ kfree(rq->buffer);
}
blk_put_request(rq);
return queue_work(kblockd_workqueue, work);
}
-EXPORT_SYMBOL(kblockd_schedule_work);
-
void kblockd_flush(void)
{
flush_workqueue(kblockd_workqueue);
kobject_put(&disk->kobj);
}
}
-
-asmlinkage int sys_ioprio_set(int ioprio)
-{
- if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
- return -EINVAL;
- if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- printk("%s: set ioprio %d\n", current->comm, ioprio);
- current->ioprio = ioprio;
- return 0;
-}
-
-asmlinkage int sys_ioprio_get(void)
-{
- return current->ioprio;
-}
-
static int sock_xmit(struct socket *sock, int send, void *buf, int size,
int msg_flags)
{
+ mm_segment_t oldfs;
int result;
struct msghdr msg;
- struct kvec iov;
+ struct iovec iov;
unsigned long flags;
sigset_t oldset;
+ oldfs = get_fs();
+ set_fs(get_ds());
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
spin_lock_irqsave(¤t->sighand->siglock, flags);
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
- result = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ result = sock_sendmsg(sock, &msg, size);
else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+ result = sock_recvmsg(sock, &msg, size, 0);
if (signal_pending(current)) {
siginfo_t info;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ set_fs(oldfs);
return result;
}
-#define PPCSTRUCT(pi) ((Interface *)(pi->private))
+#define PPCSTRUCT(pi) ((PPC *)(pi->private))
/****************************************************************/
/*
static int bpck6_init_proto(PIA *pi)
{
- Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL);
+ PPC *p = kmalloc(sizeof(PPC), GFP_KERNEL);
if (p) {
- memset(p, 0, sizeof(Interface));
+ memset(p, 0, sizeof(PPC));
pi->private = (unsigned long)p;
return 0;
}
unsigned cmd, unsigned long arg)
{
struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &cd->info, inode, cmd, arg);
+ return cdrom_ioctl(&cd->info, inode, cmd, arg);
}
static int pcd_block_media_changed(struct gendisk *disk)
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
-} Interface;
+} PPC;
//***************************************************************************
//***************************************************************************
-static int ppc6_select(Interface *ppc);
-static void ppc6_deselect(Interface *ppc);
-static void ppc6_send_cmd(Interface *ppc, u8 cmd);
-static void ppc6_wr_data_byte(Interface *ppc, u8 data);
-static u8 ppc6_rd_data_byte(Interface *ppc);
-static u8 ppc6_rd_port(Interface *ppc, u8 port);
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_wait_for_fifo(Interface *ppc);
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_extout(Interface *ppc, u8 regdata);
-static int ppc6_open(Interface *ppc);
-static void ppc6_close(Interface *ppc);
+static int ppc6_select(PPC *ppc);
+static void ppc6_deselect(PPC *ppc);
+static void ppc6_send_cmd(PPC *ppc, u8 cmd);
+static void ppc6_wr_data_byte(PPC *ppc, u8 data);
+static u8 ppc6_rd_data_byte(PPC *ppc);
+static u8 ppc6_rd_port(PPC *ppc, u8 port);
+static void ppc6_wr_port(PPC *ppc, u8 port, u8 data);
+static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count);
+static void ppc6_wait_for_fifo(PPC *ppc);
+static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count);
+static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_extout(PPC *ppc, u8 regdata);
+static int ppc6_open(PPC *ppc);
+static void ppc6_close(PPC *ppc);
//***************************************************************************
-static int ppc6_select(Interface *ppc)
+static int ppc6_select(PPC *ppc)
{
u8 i, j, k;
//***************************************************************************
-static void ppc6_deselect(Interface *ppc)
+static void ppc6_deselect(PPC *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
//***************************************************************************
-static void ppc6_send_cmd(Interface *ppc, u8 cmd)
+static void ppc6_send_cmd(PPC *ppc, u8 cmd)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wr_data_byte(Interface *ppc, u8 data)
+static void ppc6_wr_data_byte(PPC *ppc, u8 data)
{
switch(ppc->mode)
{
//***************************************************************************
-static u8 ppc6_rd_data_byte(Interface *ppc)
+static u8 ppc6_rd_data_byte(PPC *ppc)
{
u8 data = 0;
//***************************************************************************
-static u8 ppc6_rd_port(Interface *ppc, u8 port)
+static u8 ppc6_rd_port(PPC *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
//***************************************************************************
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
+static void ppc6_wr_port(PPC *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
//***************************************************************************
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
+static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wait_for_fifo(Interface *ppc)
+static void ppc6_wait_for_fifo(PPC *ppc)
{
int i;
//***************************************************************************
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
+static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_extout(Interface *ppc, u8 regdata)
+static void ppc6_wr_extout(PPC *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
//***************************************************************************
-static int ppc6_open(Interface *ppc)
+static int ppc6_open(PPC *ppc)
{
int ret;
//***************************************************************************
-static void ppc6_close(Interface *ppc)
+static void ppc6_close(PPC *ppc)
{
ppc6_deselect(ppc);
}
if (size < 0)
return -EINVAL;
if (size > (q->max_sectors << 9))
- size = q->max_sectors << 9;
+ return -EINVAL;
q->sg_reserved_size = size;
return 0;
return put_user(1, p);
}
-#define CMD_READ_SAFE 0x01
-#define CMD_WRITE_SAFE 0x02
-#define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
-#define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
-
-static int verify_command(struct file *file, unsigned char *cmd)
-{
- static const unsigned char cmd_type[256] = {
-
- /* Basic read-only commands */
- safe_for_read(TEST_UNIT_READY),
- safe_for_read(REQUEST_SENSE),
- safe_for_read(READ_6),
- safe_for_read(READ_10),
- safe_for_read(READ_12),
- safe_for_read(READ_16),
- safe_for_read(READ_BUFFER),
- safe_for_read(READ_LONG),
- safe_for_read(INQUIRY),
- safe_for_read(MODE_SENSE),
- safe_for_read(MODE_SENSE_10),
- safe_for_read(START_STOP),
-
- /* Audio CD commands */
- safe_for_read(GPCMD_PLAY_CD),
- safe_for_read(GPCMD_PLAY_AUDIO_10),
- safe_for_read(GPCMD_PLAY_AUDIO_MSF),
- safe_for_read(GPCMD_PLAY_AUDIO_TI),
-
- /* CD/DVD data reading */
- safe_for_read(GPCMD_READ_CD),
- safe_for_read(GPCMD_READ_CD_MSF),
- safe_for_read(GPCMD_READ_DISC_INFO),
- safe_for_read(GPCMD_READ_CDVD_CAPACITY),
- safe_for_read(GPCMD_READ_DVD_STRUCTURE),
- safe_for_read(GPCMD_READ_HEADER),
- safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
- safe_for_read(GPCMD_READ_SUBCHANNEL),
- safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
- safe_for_read(GPCMD_REPORT_KEY),
- safe_for_read(GPCMD_SCAN),
-
- /* Basic writing commands */
- safe_for_write(WRITE_6),
- safe_for_write(WRITE_10),
- safe_for_write(WRITE_VERIFY),
- safe_for_write(WRITE_12),
- safe_for_write(WRITE_VERIFY_12),
- safe_for_write(WRITE_16),
- safe_for_write(WRITE_LONG),
- };
- unsigned char type = cmd_type[cmd[0]];
-
- /* Anybody who can open the device can do a read-safe command */
- if (type & CMD_READ_SAFE)
- return 0;
-
- /* Write-safe commands just require a writable open.. */
- if (type & CMD_WRITE_SAFE) {
- if (file->f_mode & FMODE_WRITE)
- return 0;
- }
-
- /* And root can do any command.. */
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- /* Otherwise fail it with an "Operation not permitted" */
- return -EPERM;
-}
-
-static int sg_io(struct file *file, request_queue_t *q,
- struct gendisk *bd_disk, struct sg_io_hdr *hdr)
+static int sg_io(request_queue_t *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr)
{
unsigned long start_time;
int reading, writing;
return -EINVAL;
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (verify_command(file, cmd))
- return -EPERM;
/*
* we'll do that later
rq->flags |= REQ_BLOCK_PC;
bio = rq->bio;
- /*
- * bounce this after holding a reference to the original bio, it's
- * needed for proper unmapping
- */
- if (rq->bio)
- blk_queue_bounce(q, &rq->bio);
-
rq->timeout = (hdr->timeout * HZ) / 1000;
if (!rq->timeout)
rq->timeout = q->sg_timeout;
hdr->sb_len_wr = len;
}
- if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
+ if (blk_rq_unmap_user(rq, hdr->dxferp, bio, hdr->dxfer_len))
return -EFAULT;
/* may not have succeeded, but output values written to control
#define READ_DEFECT_DATA_TIMEOUT (60 * HZ )
#define OMAX_SB_LEN 16 /* For backward compatibility */
-static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
- struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
+static int sg_scsi_ioctl(request_queue_t *q, struct gendisk *bd_disk,
+ Scsi_Ioctl_Command __user *sic)
{
struct request *rq;
int err, in_len, out_len, bytes, opcode, cmdlen;
if (copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = verify_command(file, rq->cmd);
- if (err)
- goto error;
-
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
return err;
}
-int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+int scsi_cmd_ioctl(struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
{
request_queue_t *q;
struct request *rq;
err = -EFAULT;
if (copy_from_user(&hdr, arg, sizeof(hdr)))
break;
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr);
if (err == -EFAULT)
break;
hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr);
if (err == -EFAULT)
break;
if (!arg)
break;
- err = sg_scsi_ioctl(file, q, bd_disk, arg);
+ err = sg_scsi_ioctl(q, bd_disk, arg);
break;
case CDROMCLOSETRAY:
close = 1;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
disk->queue = swim3_queue;
- disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", i);
sprintf(disk->devfs_name, "floppy/%d", i);
set_capacity(disk, 2880);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
outb(0x80, iobase + 0x30);
/* Wait some time */
- msleep(10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100);
/* Turn FPGA on */
outb(0x00, iobase + 0x30);
outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL);
/* Timeout before it is safe to send the first HCI packet */
- msleep(1250);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout((HZ * 5) / 4); // or set it to 3/2
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
}
/* Timeout before it is safe to send the first HCI packet */
- msleep(1000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
/* Register HCI device */
err = hci_register_dev(hdev);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
btuart_change_speed(info, DEFAULT_BAUD_RATE);
/* Timeout before it is safe to send the first HCI packet */
- msleep(1000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
spin_unlock_irqrestore(&(info->lock), flags);
/* Timeout before it is safe to send the first HCI packet */
- msleep(2000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ * 2);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
struct sk_buff *skb;
unsigned long flags;
- BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
-
+ BT_ERR("Timeout, retransmitting %u pkts", bcsp->unack.qlen);
spin_lock_irqsave(&bcsp->unack.lock, flags);
while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
#define URB_ZERO_PACKET 0
#endif
-#define VERSION "2.7"
+#define VERSION "2.6"
static struct usb_driver hci_usb_driver;
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
- /* Bluetooth Ultraport Module from IBM */
- { USB_DEVICE(0x04bf, 0x030a) },
+ /* Ericsson with non-standard id */
+ { USB_DEVICE(0x0bdb, 0x1002) },
- /* ALPS Modules with non-standard id */
- { USB_DEVICE(0x044e, 0x3001) },
+ /* ALPS Module with non-standard id */
{ USB_DEVICE(0x044e, 0x3002) },
- /* Ericsson with non-standard id */
- { USB_DEVICE(0x0bdb, 0x1002) },
+ /* Bluetooth Ultraport Module from IBM */
+ { USB_DEVICE(0x04bf, 0x030a) },
{ } /* Terminating entry */
};
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET },
- /* ISSC Bluetooth Adapter v3.1 */
- { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
-
/* Digianswer device */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
BT_DBG("%s", hdev->name);
- for (i = 0; i < 4; i++)
+ for (i=0; i < 4; i++)
skb_queue_purge(&husb->transmit_q[i]);
return 0;
}
+static inline void hci_usb_wait_for_urb(struct urb *urb)
+{
+ while (atomic_read(&urb->kref.refcount) > 1) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout((5 * HZ + 999) / 1000);
+ }
+}
+
static void hci_usb_unlink_urbs(struct hci_usb *husb)
{
int i;
BT_DBG("%s", husb->hdev->name);
- for (i = 0; i < 4; i++) {
+ for (i=0; i < 4; i++) {
struct _urb *_urb;
struct urb *urb;
urb = &_urb->urb;
BT_DBG("%s unlinking _urb %p type %d urb %p",
husb->hdev->name, _urb, _urb->type, urb);
- usb_kill_urb(urb);
+ usb_unlink_urb(urb);
+ hci_usb_wait_for_urb(urb);
_urb_queue_tail(__completed_q(husb, _urb->type), _urb);
}
}
file->private_data = hci_vhci;
- return nonseekable_open(inode, file);
+ return 0;
}
static int hci_vhci_chr_close(struct inode *inode, struct file *file)
Werner Zimmermann, August 8, 1995
V1.70 Multisession support now is completed, but there is still not
enough testing done. If you can test it, please contact me. For
- details please read Documentation/cdrom/aztcd
+ details please read /usr/src/linux/Documentation/cdrom/aztcd
Werner Zimmermann, August 19, 1995
V1.80 Modification to suit the new kernel boot procedure introduced
with kernel 1.3.33. Will definitely not work with older kernels.
if (!cdrom_is_mrw(cdi, &mrw_write))
mrw = 1;
- if (CDROM_CAN(CDC_MO_DRIVE))
- ram_write = 1;
- else
- (void) cdrom_is_random_writable(cdi, &ram_write);
-
+ (void) cdrom_is_random_writable(cdi, &ram_write);
+
if (mrw)
cdi->mask &= ~CDC_MRW;
else
else if (CDROM_CAN(CDC_DVD_RAM))
ret = cdrom_dvdram_open_write(cdi);
else if (CDROM_CAN(CDC_RAM) &&
- !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE))
+ !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW))
ret = cdrom_ram_open_write(cdi);
else if (CDROM_CAN(CDC_MO_DRIVE))
ret = mo_open_write(cdi);
goto err;
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
- if (cdrom_open_write(cdi))
- goto err;
if (!CDROM_CAN(CDC_RAM))
goto err;
+ if (cdrom_open_write(cdi))
+ goto err;
ret = 0;
}
}
struct packet_command cgc;
int nr, ret;
- cdi->last_sense = 0;
-
memset(&cgc, 0, sizeof(cgc));
/*
if (!q)
return -ENXIO;
- cdi->last_sense = 0;
-
while (nframes) {
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
rq->timeout = 60 * HZ;
bio = rq->bio;
- if (rq->bio)
- blk_queue_bounce(q, &rq->bio);
-
if (blk_execute_rq(q, cdi->disk, rq)) {
struct request_sense *s = rq->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
- if (blk_rq_unmap_user(rq, bio, len))
+ if (blk_rq_unmap_user(rq, ubuf, bio, len))
ret = -EFAULT;
if (ret)
nframes -= nr;
lba += nr;
- ubuf += len;
}
return ret;
* these days. ATAPI / SCSI specific code now mainly resides in
* mmc_ioct().
*/
-int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg)
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct inode *ip,
+ unsigned int cmd, unsigned long arg)
{
struct cdrom_device_ops *cdo = cdi->ops;
int ret;
/* Try the generic SCSI command ioctl's first.. */
- ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg);
+ ret = scsi_cmd_ioctl(ip->i_bdev->bd_disk, cmd, (void __user *)arg);
if (ret != -ENOTTY)
return ret;
} cdrom_sysctl_settings;
int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int pos;
struct cdrom_device_info *cdi;
char *info = cdrom_sysctl_settings.info;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
strcpy(info+pos,"\n\n");
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
/* Unfortunately, per device settings are not implemented through
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val) {
static int scd_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &scd_info, inode, cmd, arg);
+ return cdrom_ioctl(&scd_info, inode, cmd, arg);
}
static int scd_block_media_changed(struct gendisk *disk)
static int cm206_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &cm206_info, inode, cmd, arg);
+ return cdrom_ioctl(&cm206_info, inode, cmd, arg);
}
static int cm206_block_media_changed(struct gendisk *disk)
static int mcd_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &mcd_info, inode, cmd, arg);
+ return cdrom_ioctl(&mcd_info, inode, cmd, arg);
}
static int mcd_block_media_changed(struct gendisk *disk)
unsigned cmd, unsigned long arg)
{
struct s_drive_stuff *p = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &p->info, inode, cmd, arg);
+ return cdrom_ioctl(&p->info, inode, cmd, arg);
}
static int mcdx_block_media_changed(struct gendisk *disk)
#endif /* MULTISESSION */
if (disk_info.multi)
printk(KERN_WARNING "optcd: Multisession support experimental, "
- "see Documentation/cdrom/optcd\n");
+ "see linux/Documentation/cdrom/optcd\n");
DEBUG((DEBUG_TOC, "exiting update_toc"));
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
static DECLARE_WAIT_QUEUE_HEAD(sbp_waitq);
#endif /* FUTURE */
u_char TocEnt_number;
u_char TocEnt_format; /* em */
u_int TocEnt_address;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
char has_data;
#endif /* SAFE_MIXED */
u_char ored_ctl_adr; /* to detect if CDROM contains data tracks */
return (0);
}
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
static int cc_SubChanInfo(int frame, int count, u_char *buffer)
/* "frame" is a RED BOOK (msf-bin) address */
{
return (0);
}
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
/*
* obtain if requested service disturbs current audio state
*/
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
/*
* called always if driver gets entered
* returns 0 or ERROR2 or ERROR15
case CDROMREADMODE1:
msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE);
case CDROMREADMODE2: /* not usable at the moment */
msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE_RAW1);
if (famL_drive) RETURN_UP(-EINVAL);
if (famV_drive) RETURN_UP(-EINVAL);
if (famT_drive) RETURN_UP(-EINVAL);
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
- if (copy_from_user(&read_audio, (void __user *)arg,
+ if (copy_from_user(&read_audio, (void *)arg,
sizeof(struct cdrom_read_audio)))
RETURN_UP(-EFAULT);
if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
continue;
}
- if (copy_to_user(read_audio.buf,
- current_drive->aud_buf,
+ if (copy_to_user((u_char *)read_audio.buf,
+ (u_char *) current_drive->aud_buf,
read_audio.nframes * CD_FRAMESIZE_RAW))
RETURN_UP(-EFAULT);
msg(DBG_AUD,"read_audio: copy_to_user done.\n");
case CDROMPLAYMSF:
msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMPLAYTRKIND: /* Play a track. This currently ignores index. */
msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMSTOP: /* Spin down the drive */
msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
i=cc_Pause_Resume(1);
goto request_loop;
}
-#ifdef FUTURE
+#if FUTURE
i=prepare(0,0); /* at moment not really a hassle check, but ... */
if (i!=0)
msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
sbp_sleep(0);
if (sbp_data(req) != 0)
{
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=2; /* is really a data disk */
#endif /* SAFE_MIXED */
#ifdef DEBUG_GTL
unsigned cmd, unsigned long arg)
{
struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg);
+ return cdrom_ioctl(p->sbpcd_infop, inode, cmd, arg);
}
static int sbpcd_block_media_changed(struct gendisk *disk)
if ((current_drive->ored_ctl_adr&0x40)==0)
{
msg(DBG_INF,"CD contains no data tracks.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
}
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
else if (current_drive->has_data<1) current_drive->has_data=1;
#endif /* SAFE_MIXED */
}
if (p->f_eject) cc_SpinDown();
p->diskstate_flags &= ~cd_size_bit;
p->open_count=0;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
}
if (port_index>0)
{
- msg(DBG_INF, "You should read Documentation/cdrom/sbpcd\n");
+ msg(DBG_INF, "You should read linux/Documentation/cdrom/sbpcd\n");
msg(DBG_INF, "and then configure sbpcd.h for your hardware.\n");
}
check_datarate();
if (p->drv_id==-1) continue;
switch_drive(p);
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
/*
current_drive->diskstate_flags &= ~toc_bit;
/* we *don't* need invalidate here, it's done by caller */
current_drive->diskstate_flags &= ~cd_size_bit;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
/*
* Attention! This file contains user-serviceable parts!
* I recommend to make use of it...
- * If you feel helpless, look into Documentation/cdrom/sbpcd
+ * If you feel helpless, look into linux/Documentation/cdrom/sbpcd
* (good idea anyway, at least before mailing me).
*
* The definitions for the first controller can get overridden by
unsigned cmd, unsigned long arg)
{
struct disk_info *di = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &di->viocd_info, inode, cmd, arg);
+ return cdrom_ioctl(&di->viocd_info, inode, cmd, arg);
}
static int viocd_blk_media_changed(struct gendisk *disk)
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
- depends on SERIAL_NONSTANDARD && PCI && EXPERIMENTAL && BROKEN_ON_SMP && m
+ depends on SERIAL_NONSTANDARD && EXPERIMENTAL && BROKEN_ON_SMP && m
help
This is a driver for the Multi-Tech cards which provide several
serial ports. The driver is experimental and can currently only be
config SYNCLINK
tristate "Microgate SyncLink card support"
- depends on SERIAL_NONSTANDARD && PCI
+ depends on SERIAL_NONSTANDARD
help
Provides support for the SyncLink ISA and PCI multiprotocol serial
adapters. These adapters support asynchronous and HDLC bit
If you have an Alchemy AU1000 processor (MIPS based) and you want
to use serial ports, say Y. Otherwise, say N.
+config SGI_L1_SERIAL
+ bool "SGI Altix L1 serial support"
+ depends on SERIAL_NONSTANDARD && IA64 && DISCONTIGMEM
+ help
+ If you have an SGI Altix and you want to use the serial port
+ connected to the system controller (you want this!), say Y.
+ Otherwise, say N.
+
+config SGI_L1_SERIAL_CONSOLE
+ bool "SGI Altix L1 serial console support"
+ depends on SGI_L1_SERIAL
+ help
+ If you have an SGI Altix and you would like to use the system
+ controller serial port as your console (you want this!),
+ say Y. Otherwise, say N.
+
config AU1000_SERIAL_CONSOLE
bool "Enable Au1000 serial console"
depends on AU1000_UART
console. This driver allows each pSeries partition to have a console
which is accessed via the HMC.
-config HVCS
- tristate "IBM Hypervisor Virtual Console Server support"
- depends on PPC_PSERIES
- help
- Partitionable IBM Power5 ppc64 machines allow hosting of
- firmware virtual consoles from one Linux partition by
- another Linux partition. This driver allows console data
- from Linux partitions to be accessed through TTY device
- interfaces in the device tree of a Linux partition running
- this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called hvcs.ko. Additionally, this module
- will depend on arch specific APIs exported from hvcserver.ko
- which will also be compiled when this driver is built as a
- module.
-
config QIC02_TAPE
tristate "QIC-02 tape support"
help
config APPLICOM
tristate "Applicom intelligent fieldbus card support"
- depends on PCI
---help---
This driver provides the kernel-side support for the intelligent
fieldbus cards made by Applicom International. More information
config FTAPE
tristate "Ftape (QIC-80/Travan) support"
- depends on BROKEN_ON_SMP && (ALPHA || X86)
+ depends on BROKEN_ON_SMP
---help---
If you have a tape drive that is connected to your floppy
controller, say Y here.
is assumed the platform called hpet_alloc with the RTC IRQ values for
the HPET timers.
-config HPET_MMAP
- bool "Allow mmap of HPET"
- default y
+config HPET_NOMMAP
+ bool "HPET - Control mmap capability."
+ default n
depends on HPET
help
- If you say Y here, user applications will be able to mmap
- the HPET registers.
-
- In some hardware implementations, the page containing HPET
- registers may also contain other things that shouldn't be
- exposed to the user. If this applies to your hardware,
- say N here.
+ If you say Y here, then the mmap interface for the HPET driver returns ENOSYS.
+ Some hardware implementations might not want all the memory in the page the
+ HPET control registers reside to be exposed.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-8192)"
obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
+obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o
-obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o
../net/scc.c
A subset of the documentation is in
- Documentation/networking/z8530drv.txt
+ ../../Documentation/networking/z8530drv.txt
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
- 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+ 852GM, 855GM and 865G integrated graphics chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI, or if you have any Intel integrated graphics
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
- /* VIA K8T890 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
- .device = PCI_DEVICE_ID_VIA_3238_0,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- /* VIA K8T800/K8M800/K8N800 */
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_VIA,
- .device = PCI_DEVICE_ID_VIA_838X_1,
+ .device = PCI_DEVICE_ID_VIA_8380_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
-
/* NForce3 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
/*
- * HP zx1 AGPGART routines.
- *
- * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * HP AGPGART routines.
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
#include <linux/acpi.h>
/*
* Intel(R) 855GM/852GM and 865G support added by David Dawes
* <dawes@tungstengraphics.com>.
- *
- * Intel(R) 915G support added by Alan Hourihane
- * <alanh@tungstengraphics.com>.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include "agp.h"
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
-/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-
-
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
return;
}
-/* Exists to support ARGB cursors */
-static void *i8xx_alloc_pages(void)
-{
- struct page * page;
-
- page = alloc_pages(GFP_KERNEL, 2);
- if (page == NULL) {
- return 0;
- }
- if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
- __free_page(page);
- return 0;
- }
- get_page(page);
- SetPageLocked(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page_address(page);
-}
-
-static void i8xx_destroy_pages(void *addr)
-{
- struct page *page;
-
- if (addr == NULL)
- return;
-
- page = virt_to_page(addr);
- change_page_attr(page, 4, PAGE_KERNEL);
- put_page(page);
- unlock_page(page);
- free_pages((unsigned long)addr, 2);
- atomic_dec(&agp_bridge->current_memory_agp);
-}
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
struct agp_memory *new;
void *addr;
- if (pg_count != 1 && pg_count != 4)
+ if (pg_count != 1)
return NULL;
- switch (pg_count) {
- case 1: addr = agp_bridge->driver->agp_alloc_page();
- break;
- case 4:
- /* kludge to get 4 physical pages for ARGB cursor */
- addr = i8xx_alloc_pages();
- break;
- default:
- return NULL;
- }
-
+ addr = agp_bridge->driver->agp_alloc_page();
if (addr == NULL)
return NULL;
- new = agp_create_memory(pg_count);
+ new = agp_create_memory(1);
if (new == NULL)
return NULL;
- new->memory[0] = virt_to_phys(addr);
- if (pg_count == 4) {
- /* kludge to get 4 physical pages for ARGB cursor */
- new->memory[1] = new->memory[0] + PAGE_SIZE;
- new->memory[2] = new->memory[1] + PAGE_SIZE;
- new->memory[3] = new->memory[2] + PAGE_SIZE;
- }
- new->page_count = pg_count;
- new->num_scratch_pages = pg_count;
+ new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
+ new->page_count = 1;
+ new->num_scratch_pages = 1;
new->type = AGP_PHYS_MEMORY;
new->physical = new->memory[0];
return new;
{
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
- if (curr->page_count == 4)
- i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
- else
- agp_bridge->driver->agp_destroy_page(
- phys_to_virt(curr->memory[0]));
+ agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
{
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
- {256, 65536, 6},
+ {64, 16384, 5}
};
static struct _intel_i830_private {
struct pci_dev *i830_dev; /* device one */
volatile u8 *registers;
- volatile u32 *gtt; /* I915G */
int gtt_entries;
} intel_i830_private;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- int size;
pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
- /* We obtain the size of the GTT, which is also stored (for some
- * reason) at the top of stolen memory. Then we add 4KB to that
- * for the video BIOS popup, which is also stored in there. */
- size = agp_bridge->driver->fetch_size() + 4;
-
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
+ gtt_entries = KB(512) - KB(132);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
+ gtt_entries = MB(1) - KB(132);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
+ gtt_entries = MB(8) - KB(132);
break;
case I830_GMCH_GMS_LOCAL:
rdct = INREG8(intel_i830_private.registers,
} else {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
+ gtt_entries = MB(1) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
+ gtt_entries = MB(4) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
+ gtt_entries = MB(8) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
+ gtt_entries = MB(16) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
+ gtt_entries = MB(32) - KB(132);
break;
- case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
default:
gtt_entries = 0;
break;
agp_bridge->aperture_size_idx = 0;
return(values[0].size);
} else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
agp_bridge->aperture_size_idx = 1;
return(values[1].size);
}
return(NULL);
}
-static int intel_i915_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
-
- OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
- global_cache_flush();
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
- OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
- }
-
- return (0);
-}
-
-static void intel_i915_cleanup(void)
-{
- iounmap((void *) intel_i830_private.gtt);
- iounmap((void *) intel_i830_private.registers);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
- int type)
-{
- int i,j,num_entries;
- void *temp;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_i830_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_i830_private.gtt_entries);
-
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
- return (-EINVAL);
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- return (-EINVAL);
-
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
- if ((type != 0 && type != AGP_PHYS_MEMORY) ||
- (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
- return (-EINVAL);
-
- global_cache_flush();
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
- OUTREG32(intel_i830_private.gtt, j, agp_bridge->driver->mask_memory(mem->memory[i], mem->type));
-
- global_cache_flush();
-
- agp_bridge->driver->tlb_flush(mem);
-
- return(0);
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
-{
- int i;
-
- global_cache_flush();
-
- if (pg_start < intel_i830_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
- return (-EINVAL);
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
-
- global_cache_flush();
-
- agp_bridge->driver->tlb_flush(mem);
-
- return (0);
-}
-
-static int intel_i915_fetch_size(void)
-{
- struct aper_size_info_fixed *values;
- u32 temp, offset = 0;
-
-#define I915_256MB_ADDRESS_MASK (1<<27)
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
- if (temp & I915_256MB_ADDRESS_MASK)
- offset = 0; /* 128MB aperture */
- else
- offset = 2; /* 256MB aperture */
- agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
- return(values[offset].size);
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(void)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = 0;
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
-
- intel_i830_private.gtt = (volatile u32 *) ioremap(temp2, 256 * 1024);
- if (!intel_i830_private.gtt)
- return (-ENOMEM);
-
- temp &= 0xfff80000;
-
- intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
- if (!intel_i830_private.registers)
- return (-ENOMEM);
-
- temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush();
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return(0);
-}
-
static int intel_fetch_size(void)
{
int i;
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
+ .num_aperture_sizes = 2,
.needs_scratch_page = TRUE,
.configure = intel_i830_configure,
.fetch_size = intel_i830_fetch_size,
.agp_destroy_page = agp_generic_destroy_page,
};
-static struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
- .needs_scratch_page = TRUE,
- .configure = intel_i915_configure,
- .fetch_size = intel_i915_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_destroy_page = agp_generic_destroy_page,
-};
-
-
static struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
bridge->driver = &intel_845_driver;
name = "i875";
break;
- case PCI_DEVICE_ID_INTEL_82915G_HB:
- if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
- bridge->driver = &intel_915_driver;
- } else {
- bridge->driver = &intel_845_driver;
- }
- name = "915G";
- break;
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
- name = "E7505";
+ name = "E7505";
break;
case PCI_DEVICE_ID_INTEL_7205_0:
bridge->driver = &intel_7505_driver;
intel_845_configure();
else if (bridge->driver == &intel_830mp_driver)
intel_830mp_configure();
- else if (bridge->driver == &intel_915_driver)
- intel_i915_configure();
return 0;
}
static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
{
- {
- .device_id = PCI_DEVICE_ID_SI_5591_AGP,
- .chipset_name = "5591",
- },
{
.device_id = PCI_DEVICE_ID_SI_530,
.chipset_name = "530",
.device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
.chipset_name = "PM800/PN800/PM880/PN880",
},
- /* KT880 */
- {
- .device_id = PCI_DEVICE_ID_VIA_3269_0,
- .chipset_name = "KT880",
- },
- /* KTxxx/Px8xx */
- {
- .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
- .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
- },
- /* P4M800 */
- {
- .device_id = PCI_DEVICE_ID_VIA_3296_0,
- .chipset_name = "P4M800",
- },
{ }, /* dummy final entry, always present */
};
ID(PCI_DEVICE_ID_VIA_8378_0),
ID(PCI_DEVICE_ID_VIA_PT880),
ID(PCI_DEVICE_ID_VIA_8783_0),
- ID(PCI_DEVICE_ID_VIA_PX8X0_0),
- ID(PCI_DEVICE_ID_VIA_3269_0),
- ID(PCI_DEVICE_ID_VIA_83_87XX_1),
- ID(PCI_DEVICE_ID_VIA_3296_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
{ }
};
int version_minor; /**< Minor version */
int version_patchlevel;/**< Patch level */
size_t name_len; /**< Length of name buffer */
- char __user *name; /**< Name of driver */
+ char *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
- char __user *date; /**< User-space buffer to hold date */
+ char *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
- char __user *desc; /**< User-space buffer to hold desc */
+ char *desc; /**< User-space buffer to hold desc */
} drm_version_t;
*/
typedef struct drm_unique {
size_t unique_len; /**< Length of unique */
- char __user *unique; /**< Unique name for driver instantiation */
+ char *unique; /**< Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /**< Length of user-space structures */
- drm_version_t __user *version;
+ drm_version_t *version;
} drm_list_t;
*/
typedef struct drm_buf_info {
int count; /**< Entries in list */
- drm_buf_desc_t __user *list;
+ drm_buf_desc_t *list;
} drm_buf_info_t;
*/
typedef struct drm_buf_free {
int count;
- int __user *list;
+ int *list;
} drm_buf_free_t;
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
- void __user *address; /**< Address of buffer */
+ void *address; /**< Address of buffer */
} drm_buf_pub_t;
*/
typedef struct drm_buf_map {
int count; /**< Length of the buffer list */
- void __user *virtual; /**< Mmap'd area in user-virtual */
- drm_buf_pub_t __user *list; /**< Buffer information */
+ void *virtual; /**< Mmap'd area in user-virtual */
+ drm_buf_pub_t *list; /**< Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
- int __user *send_indices; /**< List of handles to buffers */
- int __user *send_sizes; /**< Lengths of data to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
drm_dma_flags_t flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
- int __user *request_indices; /**< Buffer information */
- int __user *request_sizes;
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma_t;
*/
typedef struct drm_ctx_res {
int count;
- drm_ctx_t __user *contexts;
+ drm_ctx_t *contexts;
} drm_ctx_res_t;
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
struct work_struct work;
/** \name VBLANK IRQ support */
/*@{*/
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
atomic_t vbl_received;
spinlock_t vbl_lock;
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait);
-extern ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off);
+extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off);
/* Memory management support (drm_memory.h) */
extern void DRM(mem_init)(void);
extern void DRM(driver_irq_preinstall)( drm_device_t *dev );
extern void DRM(driver_irq_postinstall)( drm_device_t *dev );
extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
-#ifdef __HAVE_IRQ_BH
+#if __HAVE_IRQ_BH
extern void DRM(irq_immediate_bh)( void *dev );
#endif
#endif
struct proc_dir_entry *root,
struct proc_dir_entry *dev_root);
-#ifdef __HAVE_SG
+#if __HAVE_SG
/* Scatter Gather Support (drm_scatter.h) */
extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
extern int DRM(sg_alloc)(struct inode *inode, struct file *filp,
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
- if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((drm_agp_info_t *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
if (!dev->agp || !dev->agp->acquired || !drm_agp->enable)
return -EINVAL;
- if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
+ if (copy_from_user(&mode, (drm_agp_mode_t *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
- drm_agp_buffer_t __user *argp = (void __user *)arg;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(alloc)(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
request.handle = entry->handle;
request.physical = memory->physical;
- if (copy_to_user(argp, &request, sizeof(request))) {
+ if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) {
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
DRM(free_agp)(memory, pages);
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired || !drm_agp->bind_memory)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
}
DRM_DEBUG("%u\n", auth.magic);
- if (copy_to_user((drm_auth_t __user *)arg, &auth, sizeof(auth)))
+ if (copy_to_user((drm_auth_t *)arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
drm_auth_t auth;
drm_file_t *file;
- if (copy_from_user(&auth, (drm_auth_t __user *)arg, sizeof(auth)))
+ if (copy_from_user(&auth, (drm_auth_t *)arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = DRM(find_file)(dev, auth.magic))) {
int order;
unsigned long tmp;
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
- ;
+ for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
- if (size & (size - 1))
+ if ( size & ~(1 << order) )
++order;
return order;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
- drm_map_t __user *argp = (void __user *)arg;
drm_map_list_t *list;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
if ( !map )
return -ENOMEM;
- if ( copy_from_user( map, argp, sizeof(*map) ) ) {
+ if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EFAULT;
}
list_add(&list->head, &dev->maplist->head);
up(&dev->struct_sem);
- if ( copy_to_user( argp, map, sizeof(*map) ) )
+ if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
return -EFAULT;
if ( map->type != _DRM_SHM ) {
- if ( copy_to_user( &argp->handle,
+ if ( copy_to_user( &((drm_map_t *)arg)->handle,
&map->offset,
sizeof(map->offset) ) )
return -EFAULT;
drm_map_t request;
int found_maps = 0;
- if (copy_from_user(&request, (drm_map_t __user *)arg,
+ if (copy_from_user(&request, (drm_map_t *)arg,
sizeof(request))) {
return -EFAULT;
}
int byte_count;
int i;
drm_buf_t **temp_buflist;
- drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp,
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
int page_count;
unsigned long *temp_pagelist;
drm_buf_t **temp_buflist;
- drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
count = request.count;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
atomic_dec( &dev->buf_alloc );
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
- drm_buf_desc_t __user *argp = (void __user *)arg;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
count = request.count;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
- buf->filp = NULL;
+ buf->filp = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG;
{
drm_buf_desc_t request;
- if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
- drm_buf_info_t __user *argp = (void __user *)arg;
int i;
int count;
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request,
+ (drm_buf_info_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( request.count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
- drm_buf_desc_t __user *to = &request.list[count];
+ drm_buf_desc_t *to = &request.list[count];
drm_buf_entry_t *from = &dma->bufs[i];
drm_freelist_t *list = &dma->bufs[i].freelist;
if ( copy_to_user( &to->count,
}
request.count = count;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_info_t *)arg,
+ &request,
+ sizeof(request) ) )
return -EFAULT;
return 0;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_desc_t __user *)arg,
+ (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_free_t __user *)arg,
+ (drm_buf_free_t *)arg,
sizeof(request) ) )
return -EFAULT;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
- drm_buf_map_t __user *argp = (void __user *)arg;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_map_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
if ( request.count >= dma->buf_count ) {
retcode = (signed long)virtual;
goto done;
}
- request.virtual = (void __user *)virtual;
+ request.virtual = (void *)virtual;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
if ( copy_to_user( &request.list[i].idx,
request.count = dma->buf_count;
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
return retcode;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request,
+ (drm_ctx_priv_map_t *)arg,
+ sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user(argp, &request, sizeof(request)))
+ if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *)arg,
+ (drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
unsigned int cmd, unsigned long arg )
{
drm_ctx_res_t res;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
int i;
- if ( copy_from_user( &res, argp, sizeof(res) ) )
+ if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
return -EFAULT;
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
}
res.count = DRM_RESERVED_CONTEXTS;
- if ( copy_to_user( argp, &res, sizeof(res) ) )
+ if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_list_t * ctx_entry;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
ctx.handle = DRM(ctxbitmap_next)( dev );
++dev->ctx_count;
up( &dev->ctxlist_sem );
- if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
int DRM(getctx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
return -EFAULT;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
- if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
{
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
- if (copy_to_user((drm_draw_t __user *)arg, &draw, sizeof(draw)))
+ if (copy_to_user((drm_draw_t *)arg, &draw, sizeof(draw)))
return -EFAULT;
return 0;
}
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
#endif
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
#endif
int DRM(version)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
- drm_version_t __user *argp = (void __user *)arg;
drm_version_t version;
int len;
- if ( copy_from_user( &version, argp, sizeof(version) ) )
+ if ( copy_from_user( &version,
+ (drm_version_t *)arg,
+ sizeof(version) ) )
return -EFAULT;
#define DRM_COPY( name, value ) \
DRM_COPY( version.date, DRIVER_DATE );
DRM_COPY( version.desc, DRIVER_DESC );
- if ( copy_to_user( argp, &version, sizeof(version) ) )
+ if ( copy_to_user( (drm_version_t *)arg,
+ &version,
+ sizeof(version) ) )
return -EFAULT;
return 0;
}
++priv->lock_count;
- if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
drm_device_t *dev = priv->dev;
drm_lock_t lock;
- if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
* agent to request it then we should just be able to
* take it immediately and not eat the ioctl.
*/
- dev->lock.filp = NULL;
+ dev->lock.filp = 0;
{
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx;
#if !__HAVE_DRIVER_FOPS_READ
/** No-op. */
-ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_unique_t __user *argp = (void __user *)arg;
drm_unique_t u;
- if (copy_from_user(&u, argp, sizeof(u)))
+ if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (u.unique_len >= dev->unique_len) {
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
return -EFAULT;
}
u.unique_len = dev->unique_len;
- if (copy_to_user(argp, &u, sizeof(u)))
+ if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
return -EFAULT;
return 0;
}
if (dev->unique_len || dev->unique) return -EBUSY;
- if (copy_from_user(&u, (drm_unique_t __user *)arg, sizeof(u)))
- return -EFAULT;
+ if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) return -EFAULT;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_map_t __user *argp = (void __user *)arg;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
- if (copy_from_user(&map, argp, sizeof(map)))
+ if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
return -EFAULT;
idx = map.offset;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
- if (copy_to_user(argp, &map, sizeof(map))) return -EFAULT;
+ if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_client_t __user *argp = (void __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
int i;
- if (copy_from_user(&client, argp, sizeof(client)))
+ if (copy_from_user(&client, (drm_client_t *)arg, sizeof(client)))
return -EFAULT;
idx = client.idx;
down(&dev->struct_sem);
client.iocs = pt->ioctl_count;
up(&dev->struct_sem);
- if (copy_to_user((drm_client_t __user *)arg, &client, sizeof(client)))
+ if (copy_to_user((drm_client_t *)arg, &client, sizeof(client)))
return -EFAULT;
return 0;
}
up(&dev->struct_sem);
- if (copy_to_user((drm_stats_t __user *)arg, &stats, sizeof(stats)))
+ if (copy_to_user((drm_stats_t *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
drm_set_version_t sv;
drm_set_version_t retv;
int if_version;
- drm_set_version_t __user *argp = (void __user *)data;
- DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
+ DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = DRIVER_MAJOR;
retv.drm_dd_minor = DRIVER_MINOR;
- DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
+ DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_irq_busid_t __user *argp = (void __user *)arg;
drm_irq_busid_t p;
- if (copy_from_user(&p, argp, sizeof(p)))
+ if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT;
if ((p.busnum >> 8) != dev->pci_domain ||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
- if (copy_to_user(argp, &p, sizeof(p)))
+ if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
return -EFAULT;
return 0;
}
dev->dma->this_buffer = NULL;
#endif
-#ifdef __HAVE_IRQ_BH
+#if __HAVE_IRQ_BH
INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev);
#endif
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
drm_device_t *dev = priv->dev;
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
}
}
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
/**
* Wait for VBLANK.
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_wait_vblank_t __user *argp = (void __user *)data;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
if (!dev->irq)
return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL( vblwait, argp, sizeof(vblwait) );
+ DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
+ sizeof(vblwait) );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
}
done:
- DRM_COPY_TO_USER_IOCTL( argp, vblwait, sizeof(vblwait) );
+ DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
+ sizeof(vblwait) );
return ret;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_scatter_gather_t __user *argp = (void __user *)arg;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages, i, j;
if ( dev->sg )
return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
entry = DRM(alloc)( sizeof(*entry), DRM_MEM_SGLISTS );
request.handle = entry->handle;
- if ( copy_to_user( argp, &request, sizeof(request) ) ) {
+ if ( copy_to_user( (drm_scatter_gather_t *)arg,
+ &request,
+ sizeof(request) ) ) {
DRM(sg_cleanup)( entry );
return -EFAULT;
}
drm_sg_mem_t *entry;
if ( copy_from_user( &request,
- (drm_scatter_gather_t __user *)arg,
+ (drm_scatter_gather_t *)arg,
sizeof(request) ) )
return -EFAULT;
struct drm_agp_mem *agpmem;
struct page *page;
-#ifdef __alpha__
+#if __alpha__
/*
* Adjust to a bus-relative address
*/
{
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
-#ifdef DRM_DMA_HISTOGRAM
+#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, (drm_ctx_res_t __user *)arg, sizeof(res)))
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user((drm_ctx_res_t __user *)arg, &res, sizeof(res)))
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
idx = DRM(alloc_queue)(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
if (idx < 0)
DRM_DEBUG("%d\n", ctx.handle);
ctx.handle = idx;
- if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
else
ctx.flags = 0;
- if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
{
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
unsigned long addr = -ENOMEM;
if (!map)
- return get_unmapped_area(NULL, hint, len, pgoff, flags);
+ return get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
if (map->type == _DRM_FRAME_BUFFER ||
map->type == _DRM_REGISTERS) {
#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
#else
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
#endif
} else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
unsigned long slack = SHMLBA - PAGE_SIZE;
- addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags, 0);
if (!(addr & ~PAGE_MASK)) {
unsigned long kvirt = (unsigned long) map->handle;
}
}
} else {
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
}
return addr;
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
-ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_ctx_res_t __user *argp = (void __user *)arg;
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, argp, sizeof(res)))
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user(argp, &res, sizeof(res)))
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- drm_ctx_t __user *argp = (void __user *)arg;
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ctx.flags = q->flags;
atomic_dec(&q->use_count);
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
drm_queue_t *q;
drm_buf_t *buf;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
- drm_dma_t __user *argp = (void __user *)arg;
drm_dma_t d;
- if (copy_from_user(&d, argp, sizeof(d)))
+ if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
return -EFAULT;
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
- if (copy_to_user(argp, &d, sizeof(d)))
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
return retcode;
LOCK_TEST_WITH_RETURN( dev, filp );
- if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
+ if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
return -EFAULT;
switch ( init.func ) {
drm_device_t *dev = priv->dev;
drm_gamma_copy_t copy;
- if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
+ if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
return -EFAULT;
return gamma_do_copy_dma( dev, © );
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request,
+ (drm_ctx_priv_map_t *)arg,
+ sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user(argp, &request, sizeof(request)))
+ if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *)arg,
+ (drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
DRM_DEBUG("\n");
- if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
- int *ind;
- int err;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
remove_wait_queue(&q->write_queue, &entry);
}
- ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
- if (!ind)
- return -ENOMEM;
-
- if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
- err = -EFAULT;
- goto out;
- }
-
- err = -EINVAL;
for (i = 0; i < d->send_count; i++) {
- idx = ind[i];
+ idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
- ind[i], dma->buf_count - 1);
- goto out;
+ d->send_indices[i], dma->buf_count - 1);
+ return -EINVAL;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
- goto out;
+ return -EINVAL;
}
if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
- goto out;
}
- buf->used = ind[i];
+ buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
- ind[i], i);
- goto out;
+ d->send_indices[i], i);
+ return -EINVAL;
}
if (buf->waiting) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
- ind[i], i);
- goto out;
+ d->send_indices[i], i);
+ return -EINVAL;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
atomic_dec(&q->use_count);
return 0;
-
-out:
- DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
- atomic_dec(&q->use_count);
- return err;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
/* Real error */
DRM_ERROR("mmap error\n");
retcode = (signed int)buf_priv->virtual;
- buf_priv->virtual = NULL;
+ buf_priv->virtual = 0;
}
up_write( ¤t->mm->mmap_sem );
up_write(¤t->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
+ buf_priv->virtual = 0;
return retcode;
}
{
/* Get v1.1 init data */
- if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg,
+ if (copy_from_user(init, (drm_i810_pre12_init_t *)arg,
sizeof(drm_i810_pre12_init_t))) {
return -EFAULT;
}
/* This is a v1.2 client, just get the v1.2 init data */
DRM_INFO("Using POST v1.2 init.\n");
- if (copy_from_user(init, (drm_i810_init_t __user *)arg,
+ if (copy_from_user(init, (drm_i810_init_t *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
int retcode = 0;
/* Get only the init func */
- if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
+ if (copy_from_user(&init, (void *)arg, sizeof(drm_i810_init_func_t)))
return -EFAULT;
switch(init.func) {
default:
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
- if (copy_from_user(&init, (drm_i810_init_t __user *)arg,
+ if (copy_from_user(&init, (drm_i810_init_t *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
dev_priv->sarea_priv;
drm_i810_vertex_t vertex;
- if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
+ if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_device_t *dev = priv->dev;
drm_i810_clear_t clear;
- if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
+ if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d)))
+ if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
- if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];
dev_priv->sarea_priv;
drm_i810_mc_t mc;
- if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc)))
+ if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
return -EFAULT;
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
- if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
+ if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
return -EFAULT;
return 0;
}
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
struct file_operations *old_fops;
- unsigned long virtual;
int retcode = 0;
if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
old_fops = filp->f_op;
filp->f_op = &i830_buffer_fops;
dev_priv->mmap_buffer = buf;
- virtual = do_mmap(filp, 0, buf->total, PROT_READ|PROT_WRITE,
- MAP_SHARED, buf->bus_address);
+ buf_priv->virtual = (void __user *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
dev_priv->mmap_buffer = NULL;
filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
+ if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
- retcode = virtual;
+ retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
}
up_write( ¤t->mm->mmap_sem );
}
int i830_dma_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flush_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_clear_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_swap_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flip_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_copybuf(struct inode *inode,
struct file *filp,
unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
return -EINVAL;
}
- if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) ))
+ if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) ))
return -EFAULT;
result = i830_emit_irq( dev );
return -EINVAL;
}
- if (copy_from_user( &irqwait, (drm_i830_irq_wait_t __user *)arg,
+ if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg,
sizeof(irqwait) ))
return -EFAULT;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t *)data, sizeof(init) );
switch ( init.func ) {
case MGA_INIT_DMA:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
+ DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
DRM_DEBUG( "%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
int ret = 0;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = mga_dma_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
return ret;
}
typedef struct drm_mga_getparam {
int param;
- void __user *value;
+ void *value;
} drm_mga_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t __user *)data, sizeof(clear) );
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( vertex,
- (drm_mga_vertex_t __user *)data,
+ (drm_mga_vertex_t *)data,
sizeof(vertex) );
if(vertex.idx < 0 || vertex.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( indices,
- (drm_mga_indices_t __user *)data,
+ (drm_mga_indices_t *)data,
sizeof(indices) );
if(indices.idx < 0 || indices.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t __user *)data, sizeof(iload) );
+ DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) );
#if 0
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t __user *)data, sizeof(blit) );
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t *)data, sizeof(init) );
switch ( init.func ) {
case R128_INIT_CCE:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) );
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = r128_cce_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) );
return ret;
}
R128_READ_PIXELS = 0x04
} func;
int n;
- int __user *x;
- int __user *y;
- unsigned int __user *buffer;
- unsigned char __user *mask;
+ int *x;
+ int *y;
+ unsigned int *buffer;
+ unsigned char *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
- unsigned int __user *mask;
+ unsigned int *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
typedef struct drm_r128_getparam {
int param;
- void __user *value;
+ void *value;
} drm_r128_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t *) data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t *) data,
sizeof(elts) );
DRM_DEBUG( "pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data,
sizeof(blit) );
DRM_DEBUG( "pid=%d index=%d\n", DRM_CURRENTPID, blit.idx );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data,
sizeof(depth) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t *) data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t *)data, sizeof(init) );
switch ( init.func ) {
case RADEON_INIT_CP:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t __user *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = radeon_cp_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
return ret;
}
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
- drm_radeon_clear_rect_t __user *depth_boxes;
+ drm_radeon_clear_rect_t *depth_boxes;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
- drm_radeon_state_t __user *state;
+ drm_radeon_state_t *state;
int nr_prims;
- drm_radeon_prim_t __user *prim;
+ drm_radeon_prim_t *prim;
} drm_radeon_vertex2_t;
/* v1.3 - obsoletes drm_radeon_vertex2
*/
typedef struct drm_radeon_cmd_buffer {
int bufsz;
- char __user *buf;
+ char *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ drm_clip_rect_t *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
- const void __user *data;
+ const void *data;
} drm_radeon_tex_image_t;
typedef struct drm_radeon_texture {
int format;
int width; /* Texture image coordinates */
int height;
- drm_radeon_tex_image_t __user *image;
+ drm_radeon_tex_image_t *image;
} drm_radeon_texture_t;
typedef struct drm_radeon_stipple {
- unsigned int __user *mask;
+ unsigned int *mask;
} drm_radeon_stipple_t;
typedef struct drm_radeon_indirect {
typedef struct drm_radeon_getparam {
int param;
- void __user *value;
+ void *value;
} drm_radeon_getparam_t;
/* 1.6: Set up a memory manager for regions of shared memory:
int region;
int alignment;
int size;
- int __user *region_offset; /* offset from start of fb or GART */
+ int *region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc_t;
typedef struct drm_radeon_mem_free {
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
- int __user *irq_seq;
+ int *irq_seq;
} drm_radeon_irq_emit_t;
typedef struct drm_radeon_irq_wait {
#define OUT_RING_USER_TABLE( tab, sz ) do { \
int _size = (sz); \
- int __user *_tab = (tab); \
+ int *_tab = (tab); \
\
if (write + _size > mask) { \
int i = (mask+1) - write; \
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data,
sizeof(emit) );
result = radeon_emit_irq( dev );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t __user*)data,
+ DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data,
sizeof(irqwait) );
return radeon_wait_irq( dev, irqwait.irq_seq );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
sizeof(alloc) );
heap = get_heap( dev_priv, alloc.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
sizeof(memfree) );
heap = get_heap( dev_priv, memfree.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
sizeof(initheap) );
heap = get_heap( dev_priv, initheap.region );
static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
- u32 __user *offset ) {
+ u32 *offset ) {
u32 off;
DRM_GET_USER_UNCHECKED( off, offset );
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
int id,
- u32 __user *data ) {
+ u32 *data ) {
switch ( id ) {
case RADEON_EMIT_PP_MISC:
drm_file_t *filp_priv,
drm_radeon_cmd_buffer_t *cmdbuf,
unsigned int *cmdsz ) {
- u32 tmp[4];
- u32 __user *cmd = (u32 __user *)cmdbuf->buf;
+ u32 tmp[4], *cmd = ( u32* )cmdbuf->buf;
if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
DRM_ERROR( "Failed to copy data from user space\n" );
drm_buf_t *buf;
u32 format;
u32 *buffer;
- const u8 __user *data;
+ const u8 *data;
int size, dwords, tex_width, blit_width;
u32 height;
int i;
* update them for a multi-pass texture blit.
*/
height = image->height;
- data = (const u8 __user *)image->data;
+ data = (const u8 *)image->data;
size = height * blit_width;
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
- image->data = (const u8 __user *)image->data + size;
+ image->data = (const u8 *)image->data + size;
} while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data,
sizeof(elts) );
DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
+ DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
if ( tex.image == NULL ) {
DRM_ERROR( "null texture image!\n" );
}
if ( DRM_COPY_FROM_USER( &image,
- (drm_radeon_tex_image_t __user *)tex.image,
+ (drm_radeon_tex_image_t *)tex.image,
sizeof(image) ) )
return DRM_ERR(EFAULT);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d discard=%d\n",
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = header.scalars.offset;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = ((unsigned int)header.scalars.offset) + 0x100;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.vectors.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = header.vectors.offset;
int stride = header.vectors.stride;
RING_LOCALS;
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
- int ret;
+ int *cmd = (int *)cmdbuf->buf, ret;
RING_LOCALS;
DRM_DEBUG("\n");
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_clip_rect_t box;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
- int ret;
- drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+ int *cmd = (int *)cmdbuf->buf, ret;
+ drm_clip_rect_t *boxes = cmdbuf->boxes;
int i = 0;
RING_LOCALS;
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
sizeof(cmdbuf) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
while ( cmdbuf.bufsz >= sizeof(header) ) {
- if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
+ if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) {
DRM_ERROR("__get_user %p\n", cmdbuf.buf);
return DRM_ERR(EFAULT);
}
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
+ DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t* )data,
sizeof( sp ) );
switch( sp.param ) {
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t fb;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t *)data, sizeof(fb));
if (dev_priv == NULL) {
dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t),
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t fb;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
if (block) {
fb.free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
+ DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb));
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock)fb.free))
return DRM_ERR(EINVAL);
if (dev_priv->AGPHeap != NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t *)data, sizeof(agp));
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t agp;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
if (block) {
agp.free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
+ DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, agp, sizeof(agp));
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock)agp.free))
return DRM_ERR(EINVAL);
}
static ssize_t
-ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
+ds1620_read(struct file *file, char *buf, size_t count, loff_t *ptr)
{
signed int cur_temp;
signed char cur_temp_degF;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
/* convert to Fahrenheit, as per wdt.c */
ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
- union {
- struct therm __user *therm;
- int __user *i;
- } uarg;
int i;
- uarg.i = (int __user *)arg;
-
switch(cmd) {
case CMD_SET_THERMOSTATE:
case CMD_SET_THERMOSTATE2:
return -EPERM;
if (cmd == CMD_SET_THERMOSTATE) {
- if (get_user(therm.hi, uarg.i))
+ if (get_user(therm.hi, (int *)arg))
return -EFAULT;
therm.lo = therm.hi - 3;
} else {
- if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
+ if (copy_from_user(&therm, (void *)arg, sizeof(therm)))
return -EFAULT;
}
therm.hi >>= 1;
if (cmd == CMD_GET_THERMOSTATE) {
- if (put_user(therm.hi, uarg.i))
+ if (put_user(therm.hi, (int *)arg))
return -EFAULT;
} else {
- if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
+ if (copy_to_user((void *)arg, &therm, sizeof(therm)))
return -EFAULT;
}
break;
if (cmd == CMD_GET_TEMPERATURE)
i >>= 1;
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_GET_STATUS:
i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_GET_FAN:
i = netwinder_get_fan();
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_SET_FAN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (get_user(i, uarg.i))
+ if (get_user(i, (int *)arg))
return -EFAULT;
netwinder_set_fan(i);
static struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
- .open = nonseekable_open,
.read = ds1620_read,
.ioctl = ds1620_ioctl,
};
return ret;
#ifdef THERM_USE_PROC
- proc_therm_ds1620 = create_proc_entry("therm", 0, NULL);
+ proc_therm_ds1620 = create_proc_entry("therm", 0, 0);
if (proc_therm_ds1620)
proc_therm_ds1620->read_proc = proc_therm_ds1620_read;
else
}
case 2: /* 16 bit */
{
- const short *data;
+ short *data;
count /= 2;
- data = (const short *)buf;
+ data = (short*) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.w[1], data+n++));
return 2*n;
}
case 4: /* 32 bit */
{
- const long *data;
+ long *data;
count /= 4;
- data = (const long *)buf;
+ data = (long*) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.l, data+n++));
return 4*n;
char ch;
int i = 0, retries;
+ /* Can't seek (pread) on the DoubleTalk. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
TRACE_TEXT("(dtlk_read");
/* printk("DoubleTalk PC - dtlk_read()\n"); */
}
#endif
+ /* Can't seek (pwrite) on the DoubleTalk. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (iminor(file->f_dentry->d_inode) != DTLK_MINOR)
return -EINVAL;
{
TRACE_TEXT("(dtlk_open");
- nonseekable_open(inode, file);
switch (iminor(inode)) {
case DTLK_MINOR:
if (dtlk_busy)
return -EBUSY;
- return nonseekable_open(inode, file);
+ return 0;
default:
return -ENXIO;
#define ENABLE_PCI
#endif /* CONFIG_PCI */
-#define putUser(arg1, arg2) put_user(arg1, (unsigned long __user *)arg2)
-#define getUser(arg1, arg2) get_user(arg1, (unsigned __user *)arg2)
+#define putUser(arg1, arg2) put_user(arg1, (unsigned long *)arg2)
+#define getUser(arg1, arg2) get_user(arg1, (unsigned int *)arg2)
#ifdef ENABLE_PCI
#include <linux/pci.h>
void epca_setup(char *, int *);
void console_print(const char *);
-static int get_termio(struct tty_struct *, struct termio __user *);
+static int get_termio(struct tty_struct *, struct termio *);
static int pc_write(struct tty_struct *, int, const unsigned char *, int);
int pc_init(void);
if (bytesAvailable)
{ /* Begin bytesAvailable */
- /* ---------------------------------------------------------------
- The below function reads data from user memory. This routine
- can not be used in an interrupt routine. (Because it may
- generate a page fault) It can only be called while we can the
- user context is accessible.
-
- The prototype is :
- inline void copy_from_user(void * to, const void * from,
- unsigned long count);
-
- I also think (Check hackers guide) that optimization must
- be turned ON. (Which sounds strange to me...)
-
- Remember copy_from_user WILL generate a page fault if the
- user memory being accessed has been swapped out. This can
- cause this routine to temporarily sleep while this page
- fault is occurring.
-
- ----------------------------------------------------------------- */
- if (copy_from_user(ch->tmp_buf, buf,
- bytesAvailable))
- return -EFAULT;
+ /* Can the user buffer be accessed at the moment ? */
+ if (verify_area(VERIFY_READ, (char*)buf, bytesAvailable))
+ bytesAvailable = 0; /* Can't do; try again later */
+ else /* Evidently it can, began transmission */
+ { /* Begin if area verified */
+ /* ---------------------------------------------------------------
+ The below function reads data from user memory. This routine
+ can not be used in an interrupt routine. (Because it may
+ generate a page fault) It can only be called while we can the
+ user context is accessible.
+
+ The prototype is :
+ inline void copy_from_user(void * to, const void * from,
+ unsigned long count);
+
+ I also think (Check hackers guide) that optimization must
+ be turned ON. (Which sounds strange to me...)
+
+ Remember copy_from_user WILL generate a page fault if the
+ user memory being accessed has been swapped out. This can
+ cause this routine to temporarily sleep while this page
+ fault is occurring.
+
+ ----------------------------------------------------------------- */
+
+ if (copy_from_user(ch->tmp_buf, buf,
+ bytesAvailable))
+ return -EFAULT;
+
+ } /* End if area verified */
+
} /* End bytesAvailable */
/* ------------------------------------------------------------------
ch->boardnum = crd;
ch->channelnum = i;
ch->magic = EPCA_MAGIC;
- ch->tty = NULL;
+ ch->tty = 0;
if (shrinkmem)
{
{ /* Begin receive_data */
unchar *rptr;
- struct termios *ts = NULL;
+ struct termios *ts = 0;
struct tty_struct *tty;
volatile struct board_chan *bc;
register int dataToRead, wrapgap, bytesAvailable;
static int info_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
+ int error;
+
switch (cmd)
{ /* Begin switch cmd */
struct digi_info di ;
int brd;
- getUser(brd, (unsigned int __user *)arg);
+ getUser(brd, (unsigned int *)arg);
+
+ if ((error = verify_area(VERIFY_WRITE, (char*)arg, sizeof(di))))
+ {
+ printk(KERN_ERR "DIGI_GETINFO : verify area size 0x%x failed\n",sizeof(di));
+ return(error);
+ }
if ((brd < 0) || (brd >= num_cards) || (num_cards == 0))
return (-ENODEV);
di.port = boards[brd].port ;
di.membase = boards[brd].membase ;
- if (copy_to_user((void __user *)arg, &di, sizeof (di)))
+ if (copy_to_user((char *)arg, &di, sizeof (di)))
return -EFAULT;
break;
epcaparam(tty,ch);
memoff(ch);
restore_flags(flags);
- return 0;
}
static int pc_ioctl(struct tty_struct *tty, struct file * file,
{ /* Begin pc_ioctl */
digiflow_t dflow;
- int retval;
+ int retval, error;
unsigned long flags;
unsigned int mflag, mstat;
unsigned char startc, stopc;
volatile struct board_chan *bc;
struct channel *ch = (struct channel *) tty->driver_data;
- void __user *argp = (void __user *)arg;
if (ch)
bc = ch->brdchan;
{ /* Begin switch cmd */
case TCGETS:
- if (copy_to_user(argp,
+ if (copy_to_user((struct termios *)arg,
tty->termios, sizeof(struct termios)))
return -EFAULT;
return(0);
case TCGETA:
- return get_termio(tty, argp);
+ return get_termio(tty, (struct termio *)arg);
case TCSBRK: /* SVID version: non-zero arg --> no break */
return 0;
case TIOCGSOFTCAR:
- if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
- return -EFAULT;
+
+ error = verify_area(VERIFY_WRITE, (void *) arg,sizeof(long));
+ if (error)
+ return error;
+
+ putUser(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long *) arg);
return 0;
case TIOCSSOFTCAR:
+ /*RONNIE PUT VERIFY_READ (See above) check here */
{
unsigned int value;
- if (get_user(value, (unsigned __user *)argp))
- return -EFAULT;
+ getUser(value, (unsigned int *)arg);
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(value ? CLOCAL : 0));
case TIOCMODG:
mflag = pc_tiocmget(tty, file);
- if (put_user(mflag, (unsigned long __user *)argp))
+ if (putUser(mflag, (unsigned int *) arg))
return -EFAULT;
break;
case TIOCMODS:
- if (get_user(mstat, (unsigned __user *)argp))
+ if (getUser(mstat, (unsigned int *)arg))
return -EFAULT;
return pc_tiocmset(tty, file, mstat, ~mstat);
break;
case DIGI_GETA:
- if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
+ if (copy_to_user((char*)arg, &ch->digiext,
+ sizeof(digi_t)))
return -EFAULT;
break;
/* Fall Thru */
case DIGI_SETA:
- if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
+ if (copy_from_user(&ch->digiext, (char*)arg,
+ sizeof(digi_t)))
return -EFAULT;
if (ch->digiext.digi_flags & DIGI_ALTPIN)
memoff(ch);
restore_flags(flags);
- if (copy_to_user(argp, &dflow, sizeof(dflow)))
+ if (copy_to_user((char*)arg, &dflow, sizeof(dflow)))
return -EFAULT;
break;
stopc = ch->stopca;
}
- if (copy_from_user(&dflow, argp, sizeof(dflow)))
+ if (copy_from_user(&dflow, (char*)arg, sizeof(dflow)))
return -EFAULT;
if (dflow.startc != startc || dflow.stopc != stopc)
/* --------------------- Begin get_termio ----------------------- */
-static int get_termio(struct tty_struct * tty, struct termio __user * termio)
+static int get_termio(struct tty_struct * tty, struct termio * termio)
{ /* Begin get_termio */
- return kernel_termios_to_user_termio(termio, tty->termios);
+ int error;
+
+ error = verify_area(VERIFY_WRITE, termio, sizeof (struct termio));
+ if (error)
+ return error;
+
+ kernel_termios_to_user_termio(termio, tty->termios);
+
+ return 0;
} /* End get_termio */
/* ---------------------- Begin epca_setup -------------------------- */
void epca_setup(char *str, int *ints)
else if (request_dma(dma, "esp serial")) {
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
+ dma_buffer = 0;
info->stat_flags |= ESP_STAT_USE_PIO;
}
free_dma(dma);
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
+ dma_buffer = 0;
}
}
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
info->IER = 0;
*/
static int get_serial_info(struct esp_struct * info,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
+ if (!retinfo)
+ return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
tmp.line = info->line;
}
static int get_esp_config(struct esp_struct * info,
- struct hayes_esp_config __user *retinfo)
+ struct hayes_esp_config * retinfo)
{
struct hayes_esp_config tmp;
}
static int set_serial_info(struct esp_struct * info,
- struct serial_struct __user *new_info)
+ struct serial_struct * new_info)
{
struct serial_struct new_serial;
struct esp_struct old_info;
}
static int set_esp_config(struct esp_struct * info,
- struct hayes_esp_config __user * new_info)
+ struct hayes_esp_config * new_info)
{
struct hayes_esp_config new_config;
unsigned int change_dma;
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
+static int get_lsr_info(struct esp_struct * info, unsigned int *value)
{
unsigned char status;
unsigned int result;
{
struct esp_struct * info = (struct esp_struct *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser; /* user space */
- void __user *argp = (void __user *)arg;
+ struct serial_icounter_struct *p_cuser; /* user space */
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
switch (cmd) {
case TIOCGSERIAL:
- return get_serial_info(info, argp);
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
- return set_serial_info(info, argp);
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSERCONFIG:
/* do not reconfigure after initial configuration */
return 0;
case TIOCSERGWILD:
- return put_user(0L, (unsigned long __user *)argp);
+ return put_user(0L, (unsigned long *) arg);
case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, argp);
+ return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERSWILD:
if (!capable(CAP_SYS_ADMIN))
cli();
cnow = info->icount;
sti();
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
if (put_user(cnow.cts, &p_cuser->cts) ||
put_user(cnow.dsr, &p_cuser->dsr) ||
put_user(cnow.rng, &p_cuser->rng) ||
return 0;
case TIOCGHAYESESP:
- return get_esp_config(info, argp);
+ return (get_esp_config(info, (struct hayes_esp_config *)arg));
case TIOCSHAYESESP:
- return set_esp_config(info, argp);
+ return (set_esp_config(info, (struct hayes_esp_config *)arg));
default:
return -ENOIOCTLCMD;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
int i, offset;
int region_start;
struct esp_struct * info;
- struct esp_struct *last_primary = NULL;
+ struct esp_struct *last_primary = 0;
int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380};
esp_driver = alloc_tty_driver(NR_PORTS);
/* compress a block of memory, decompress a block of memory, or to identify */
/* itself. For more information, see the specification file "compress.h". */
-EXPORT void lzrw3_compress(
- UWORD action, /* Action to be performed. */
- UBYTE *wrk_mem, /* Address of working memory we can use.*/
- UBYTE *src_adr, /* Address of input data. */
- LONG src_len, /* Length of input data. */
- UBYTE *dst_adr, /* Address to put output data. */
- void *p_dst_len /* Address of longword for length of output data.*/
-)
+EXPORT void lzrw3_compress(action,wrk_mem,src_adr,src_len,dst_adr,p_dst_len)
+UWORD action; /* Action to be performed. */
+UBYTE *wrk_mem; /* Address of working memory we can use. */
+UBYTE *src_adr; /* Address of input data. */
+LONG src_len; /* Length of input data. */
+UBYTE *dst_adr; /* Address to put output data. */
+void *p_dst_len; /* Address of longword for length of output data. */
{
switch (action)
{
(((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF)
/******************************************************************************/
-
+
+LOCAL void compress_compress
+ (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone (OZ). */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May */
/* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/
/* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES. */
-LOCAL void compress_compress(UBYTE *p_wrk_mem,
- UBYTE *p_src_first, ULONG src_len,
- UBYTE *p_dst_first, LONG *p_dst_len)
+UBYTE *p_wrk_mem;
+UBYTE *p_src_first;
+ULONG src_len;
+UBYTE *p_dst_first;
+LONG *p_dst_len;
{
/* p_src and p_dst step through the source and destination blocks. */
register UBYTE *p_src = p_src_first;
/* to the hash table entry corresponding to the second youngest literal. */
/* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending */
/* literal. The variables are initialized to zero meaning an empty "buffer". */
- UBYTE **p_h1=NULL;
- UBYTE **p_h2=NULL;
+ UBYTE **p_h1=0;
+ UBYTE **p_h2=0;
/* To start, we write the flag bytes. Being optimistic, we set the flag to */
/* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the */
/* upon the arrival of extra context bytes. */
if (p_h1!=0)
{
- if (p_h2)
- {*p_h2=p_ziv-2; p_h2=NULL;}
- *p_h1=p_ziv-1; p_h1=NULL;
+ if (p_h2!=0)
+ {*p_h2=p_ziv-2; p_h2=0;}
+ *p_h1=p_ziv-1; p_h1=0;
}
/* In any case, we can update the hash table based on the current */
/******************************************************************************/
+LOCAL void compress_decompress
+ (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone. */
/* Output : Length of output block written to *p_dst_len. */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
/* Output : Writes only in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
-LOCAL void compress_decompress( UBYTE *p_wrk_mem,
- UBYTE *p_src_first, LONG src_len,
- UBYTE *p_dst_first, ULONG *p_dst_len)
+UBYTE *p_wrk_mem;
+UBYTE *p_src_first;
+LONG src_len;
+UBYTE *p_dst_first;
+ULONG *p_dst_len;
{
/* Byte pointers p_src and p_dst scan through the input and output blocks. */
register UBYTE *p_src = p_src_first+FLAG_BYTES;
/* forward */
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_read(int *read_cnt,
- __u8 __user *dst_buf, const int to_do,
+ __u8 *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_seek(unsigned int new_block_pos,
*/
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume)
{
int req_len_left = req_len;
* be set to 0
*/
static int zftc_read (int *read_cnt,
- __u8 __user *dst_buf, const int to_do,
+ __u8 *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume)
{
TRACE(ft_t_info, "ftape_init @ 0x%p", ftape_init);
/* Allocate the DMA buffers. They are deallocated at cleanup() time.
*/
-#ifdef TESTING
+#if TESTING
#ifdef MODULE
while (ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS) < 0) {
ftape_sleep(FT_SECOND/20);
ptr += get_history_info(ptr);
len = strlen(page);
- *start = NULL;
+ *start = 0;
if (off+count >= len) {
*eof = 1;
} else {
/* IOCTL routine called by kernel-interface code
*/
-int _zft_ioctl(unsigned int command, void __user * arg)
+int _zft_ioctl(unsigned int command, void * arg)
{
int result;
union { struct mtop mtop;
*/
extern int _zft_open(unsigned int dev_minor, unsigned int access_mode);
extern int _zft_close(void);
-extern int _zft_ioctl(unsigned int command, void __user *arg);
+extern int _zft_ioctl(unsigned int command, void *arg);
#endif
static int zft_ioctl(struct inode *ino, struct file *filep,
unsigned int command, unsigned long arg);
static int zft_mmap(struct file *filep, struct vm_area_struct *vma);
-static ssize_t zft_read (struct file *fp, char __user *buff,
+static ssize_t zft_read (struct file *fp, char *buff,
size_t req_len, loff_t *ppos);
-static ssize_t zft_write(struct file *fp, const char __user *buff,
+static ssize_t zft_write(struct file *fp, const char *buff,
size_t req_len, loff_t *ppos);
static struct file_operations zft_cdev =
int result;
TRACE_FUN(ft_t_flow);
- nonseekable_open(ino, filep);
TRACE(ft_t_flow, "called for minor %d", iminor(ino));
if ( test_and_set_bit(0,&busy_flag) ) {
TRACE_ABORT(-EBUSY, ft_t_warn, "failed: already busy");
old_sigmask = current->blocked; /* save mask */
sigfillset(¤t->blocked);
/* This will work as long as sizeof(void *) == sizeof(long) */
- result = _zft_ioctl(command, (void __user *) arg);
+ result = _zft_ioctl(command, (void *) arg);
current->blocked = old_sigmask; /* restore mask */
TRACE_EXIT result;
}
/* Read from floppy tape device
*/
-static ssize_t zft_read(struct file *fp, char __user *buff,
+static ssize_t zft_read(struct file *fp, char *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
/* Write to tape device
*/
-static ssize_t zft_write(struct file *fp, const char __user *buff,
+static ssize_t zft_write(struct file *fp, const char *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
struct zft_cmpr_ops {
int (*write)(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
int (*read)(int *read_cnt,
- __u8 __user *dst_buf, const int req_len,
+ __u8 *dst_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
int (*seek)(unsigned int new_block_pos,
* amount of data actually * copied to the user-buffer
*/
static int zft_simple_read (int *read_cnt,
- __u8 __user *dst_buf,
+ __u8 *dst_buf,
const int to_do,
const __u8 *src_buf,
const int seg_sz,
* req_len: how much data should be read at most.
* volume: contains information on current volume (blk_sz etc.)
*/
-static int empty_deblock_buf(__u8 __user *usr_buf, const int req_len,
+static int empty_deblock_buf(__u8 *usr_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
zft_position *pos,
const zft_volinfo *volume)
* use small block-sizes. The block-size may be 1kb (SECTOR_SIZE). In
* this case a MTFSR 28 maybe still inside the same segment.
*/
-int _zft_read(char __user *buff, int req_len)
+int _zft_read(char* buff, int req_len)
{
int req_clipped;
int result = 0;
0, FT_SEGMENT_SIZE)
/* hook for the VFS interface
*/
-extern int _zft_read(char __user *buff, int req_len);
+extern int _zft_read(char* buff, int req_len);
#endif /* _ZFTAPE_READ_H */
*/
static int zft_simple_write(int *cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos,const zft_volinfo *volume)
{
int space_left;
static int fill_deblock_buf(__u8 *dst_buf, const int seg_sz,
zft_position *pos, const zft_volinfo *volume,
- const char __user *usr_buf, const int req_len)
+ const char *usr_buf, const int req_len)
{
int cnt = 0;
int result = 0;
/* called by the kernel-interface routine "zft_write()"
*/
-int _zft_write(const char __user *buff, int req_len)
+int _zft_write(const char* buff, int req_len)
{
int result = 0;
int written = 0;
/* hook for the VFS interface
*/
-extern int _zft_write(const char __user *buff, int req_len);
+extern int _zft_write(const char *buff, int req_len);
#endif /* _ZFTAPE_WRITE_H */
#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__)
#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__)
-#ifdef NEW_WRITE_LOCKING
+#if NEW_WRITE_LOCKING
#define DECL /* Nothing */
#define LOCKIT down (& port->port_write_sem);
#define RELEASEIT up (&port->port_write_sem);
if (port->xmit_buf) {
free_page((unsigned long) port->xmit_buf);
- port->xmit_buf = NULL;
+ port->xmit_buf = 0;
}
if (port->tty)
port->event = 0;
port->rd->close (port);
port->rd->shutdown_port (port);
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
}
-int gs_setserial(struct gs_port *port, struct serial_struct __user *sp)
+int gs_setserial(struct gs_port *port, struct serial_struct *sp)
{
struct serial_struct sio;
* Generate the serial struct info.
*/
-int gs_getserial(struct gs_port *port, struct serial_struct __user *sp)
+int gs_getserial(struct gs_port *port, struct serial_struct *sp)
{
struct serial_struct sio;
{
struct proc_dir_entry *r;
- r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL);
+ r = create_proc_read_entry("driver/rtc", 0, 0, gen_rtc_read_proc, NULL);
if (!r)
return -ENOMEM;
return 0;
static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
-static int hangcheck_reboot = 1; /* Defaults to reboot */
+static int hangcheck_reboot; /* Defaults to not reboot */
/* Driver options */
module_param(hangcheck_tick, int, 0);
/*
* Intel & MS High Precision Event Timer Implementation.
- *
- * Copyright (C) 2003 Intel Corporation
+ * Contributors:
* Venki Pallipadi
- * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
- * Bob Picco <robert.picco@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Bob Picco
*/
#include <linux/config.h>
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
-#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_NOMMAP
+ return -ENOSYS;
+#else
struct hpet_dev *devp;
unsigned long addr;
}
return 0;
-#else
- return -ENOSYS;
#endif
}
hdp->hd_nirqs = irqp->number_of_interrupts;
for (i = 0; i < hdp->hd_nirqs; i++)
+#ifdef CONFIG_IA64
hdp->hd_irq[i] =
acpi_register_gsi(irqp->interrupts[i],
irqp->edge_level,
irqp->active_high_low);
+#else
+ hdp->hd_irq[i] = irqp->interrupts[i];
+#endif
}
}
+++ /dev/null
-/*
- * IBM eServer Hypervisor Virtual Console Server Device Driver
- * Copyright (C) 2003, 2004 IBM Corp.
- * Ryan S. Arnold (rsa@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
- *
- * This is the device driver for the IBM Hypervisor Virtual Console Server,
- * "hvcs". The IBM hvcs provides a tty driver interface to allow Linux
- * user space applications access to the system consoles of logically
- * partitioned operating systems, e.g. Linux, running on the same partitioned
- * Power5 ppc64 system. Physical hardware consoles per partition are not
- * practical on this hardware so system consoles are accessed by this driver
- * using inter-partition firmware interfaces to virtual terminal devices.
- *
- * A vty is known to the HMC as a "virtual serial server adapter". It is a
- * virtual terminal device that is created by firmware upon partition creation
- * to act as a partitioned OS's console device.
- *
- * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64
- * Linux system upon their creation by the HMC or their exposure during boot.
- * The non-user interactive backend of this driver is implemented as a vio
- * device driver so that it can receive notification of vty-server lifetimes
- * after it registers with the vio bus to handle vty-server probe and remove
- * callbacks.
- *
- * Many vty-servers can be configured to connect to one vty, but a vty can
- * only be actively connected to by a single vty-server, in any manner, at one
- * time. If the HMC is currently hosting the console for a target Linux
- * partition; attempts to open the tty device to the partition's console using
- * the hvcs on any partition will return -EBUSY with every open attempt until
- * the HMC frees the connection between its vty-server and the desired
- * partition's vty device. Conversely, a vty-server may only be connected to
- * a single vty at one time even though it may have several configured vty
- * partner possibilities.
- *
- * Firmware does not provide notification of vty partner changes to this
- * driver. This means that an HMC Super Admin may add or remove partner vtys
- * from a vty-server's partner list but the changes will not be signaled to
- * the vty-server. Firmware only notifies the driver when a vty-server is
- * added or removed from the system. To compensate for this deficiency, this
- * driver implements a sysfs update attribute which provides a method for
- * rescanning partner information upon a user's request.
- *
- * Each vty-server, prior to being exposed to this driver is reference counted
- * using the 2.6 Linux kernel kobject construct. This kobject is also used by
- * the vio bus to provide a vio device sysfs entry that this driver attaches
- * device specific attributes to, including partner information. The vio bus
- * framework also provides a sysfs entry for each vio driver. The hvcs driver
- * provides driver attributes in this entry.
- *
- * For direction on installation and usage of this driver please reference
- * Documentation/powerpc/hvcs.txt.
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/major.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <asm/hvconsole.h>
-#include <asm/hvcserver.h>
-#include <asm/uaccess.h>
-#include <asm/vio.h>
-
-/*
- * 1.0.0 -> 1.1.0 Added kernel_thread scheduling methodology to driver to
- * replace wait_task constructs.
- *
- * 1.1.0 -> 1.2.0 Moved pi_buff initialization out of arch code into driver code
- * and added locking to share this buffer between hvcs_struct instances. This
- * is because the page_size kmalloc can't be done with a spin_lock held.
- *
- * Also added sysfs attribute to manually disconnect the vty-server from the vty
- * due to stupid firmware behavior when opening the connection then sending data
- * then then quickly closing the connection would cause data loss on the
- * receiving side. This required some reordering of the termination code.
- *
- * Fixed the hangup scenario and fixed memory leaks on module_exit.
- *
- * 1.2.0 -> 1.3.0 Moved from manual kernel thread creation & execution to
- * kthread construct which replaced in-kernel IPC for thread termination with
- * kthread_stop and kthread_should_stop. Explicit wait_queue handling was
- * removed because kthread handles this. Minor bug fix to postpone partner_info
- * clearing on hvcs_close until adapter removal to preserve context data for
- * printk on partner connection free. Added lock to protect hvcs_structs so
- * that hvcs_struct instances aren't added or removed during list traversal.
- * Cleaned up comment style, added spaces after commas, and broke function
- * declaration lines to be under 80 columns.
- */
-#define HVCS_DRIVER_VERSION "1.3.0"
-
-MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
-MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HVCS_DRIVER_VERSION);
-
-/*
- * Since the Linux TTY code does not currently (2-04-2004) support dynamic
- * addition of tty derived devices and we shouldn't allocate thousands of
- * tty_device pointers when the number of vty-server & vty partner connections
- * will most often be much lower than this, we'll arbitrarily allocate
- * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we
- * register the tty_driver. This can be overridden using an insmod parameter.
- */
-#define HVCS_DEFAULT_SERVER_ADAPTERS 64
-
-/*
- * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device
- * nodes as a sanity check. Theoretically there can be over 1 Billion
- * vty-server & vty partner connections.
- */
-#define HVCS_MAX_SERVER_ADAPTERS 1024
-
-/*
- * We let Linux assign us a major number and we start the minors at zero. There
- * is no intuitive mapping between minor number and the target partition. The
- * mapping of minor number is related to the order the vty-servers are exposed
- * to this driver via the hvcs_probe function.
- */
-#define HVCS_MINOR_START 0
-
-/*
- * The hcall interface involves putting 8 chars into each of two registers.
- * We load up those 2 registers (in arch/ppc64/hvconsole.c) by casting char[16]
- * to long[2]. It would work without __ALIGNED__, but a little (tiny) bit
- * slower because an unaligned load is slower than aligned load.
- */
-#define __ALIGNED__ __attribute__((__aligned__(8)))
-
-/* Converged location code string length + 1 null terminator */
-#define CLC_LENGTH 80
-
-/*
- * How much data can firmware send with each hvc_put_chars()? Maybe this
- * should be moved into an architecture specific area.
- */
-#define HVCS_BUFF_LEN 16
-
-/*
- * This is the maximum amount of data we'll let the user send us (hvcs_write) at
- * once in a chunk as a sanity check.
- */
-#define HVCS_MAX_FROM_USER 4096
-
-/*
- * Be careful when adding flags to this line discipline. Don't add anything
- * that will cause echoing or we'll go into recursive loop echoing chars back
- * and forth with the console drivers.
- */
-static struct termios hvcs_tty_termios = {
- .c_iflag = IGNBRK | IGNPAR,
- .c_oflag = OPOST,
- .c_cflag = B38400 | CS8 | CREAD | HUPCL,
- .c_cc = INIT_C_CC
-};
-
-/*
- * This value is used to take the place of a command line parameter when the
- * module is inserted. It starts as -1 and stays as such if the user doesn't
- * specify a module insmod parameter. If they DO specify one then it is set to
- * the value of the integer passed in.
- */
-static int hvcs_parm_num_devs = -1;
-module_param(hvcs_parm_num_devs, int, 0);
-
-char hvcs_driver_name[] = "hvcs";
-char hvcs_device_node[] = "hvcs";
-char hvcs_driver_string[]
- = "IBM hvcs (Hypervisor Virtual Console Server) Driver";
-
-/* Status of partner info rescan triggered via sysfs. */
-static int hvcs_rescan_status = 0;
-
-static struct tty_driver *hvcs_tty_driver;
-
-/*
- * This is used to associate a vty-server, as it is exposed to this driver, with
- * a preallocated tty_struct.index. The dev node and hvcs index numbers are not
- * re-used after device removal otherwise removing and adding a new one would
- * link a /dev/hvcs* entry to a different vty-server than it did before the
- * removal. Incidentally, a newly exposed vty-server will always map to an
- * incrementally higher /dev/hvcs* entry than the last exposed vty-server.
- */
-static int hvcs_struct_count = -1;
-
-/*
- * Used by the khvcsd to pick up I/O operations when the kernel_thread is
- * already awake but potentially shifted to TASK_INTERRUPTIBLE state.
- */
-static int hvcs_kicked = 0;
-
-/* Used the the kthread construct for task operations */
-static struct task_struct *hvcs_task;
-
-/*
- * We allocate this for the use of all of the hvcs_structs when they fetch
- * partner info.
- */
-static unsigned long *hvcs_pi_buff;
-
-static spinlock_t hvcs_pi_lock;
-
-/* One vty-server per hvcs_struct */
-struct hvcs_struct {
- spinlock_t lock;
-
- /*
- * This index identifies this hvcs device as the complement to a
- * specific tty index.
- */
- unsigned int index;
-
- struct tty_struct *tty;
- unsigned int open_count;
-
- /*
- * Used to tell the driver kernel_thread what operations need to take
- * place upon this hvcs_struct instance.
- */
- int todo_mask;
-
- /*
- * This buffer is required so that when hvcs_write_room() reports that
- * it can send HVCS_BUFF_LEN characters that it will buffer the full
- * HVCS_BUFF_LEN characters if need be. This is essential for opost
- * writes since they do not do high level buffering and expect to be
- * able to send what the driver commits to sending buffering
- * [e.g. tab to space conversions in n_tty.c opost()].
- */
- char buffer[HVCS_BUFF_LEN];
- int chars_in_buffer;
-
- /*
- * Any variable below the kobject is valid before a tty is connected and
- * stays valid after the tty is disconnected. These shouldn't be
- * whacked until the koject refcount reaches zero though some entries
- * may be changed via sysfs initiatives.
- */
- struct kobject kobj; /* ref count & hvcs_struct lifetime */
- int connected; /* is the vty-server currently connected to a vty? */
- unsigned int p_unit_address; /* partner unit address */
- unsigned int p_partition_ID; /* partner partition ID */
- char p_location_code[CLC_LENGTH];
- struct list_head next; /* list management */
- struct vio_dev *vdev;
-};
-
-/* Required to back map a kobject to its containing object */
-#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj)
-
-static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
-static spinlock_t hvcs_structs_lock;
-
-static void hvcs_unthrottle(struct tty_struct *tty);
-static void hvcs_throttle(struct tty_struct *tty);
-static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance,
- struct pt_regs *regs);
-
-static int hvcs_write(struct tty_struct *tty, int from_user,
- const unsigned char *buf, int count);
-static int hvcs_write_room(struct tty_struct *tty);
-static int hvcs_chars_in_buffer(struct tty_struct *tty);
-
-static int hvcs_has_pi(struct hvcs_struct *hvcsd);
-static void hvcs_set_pi(struct hvcs_partner_info *pi,
- struct hvcs_struct *hvcsd);
-static int hvcs_get_pi(struct hvcs_struct *hvcsd);
-static int hvcs_rescan_devices_list(void);
-
-static int hvcs_partner_connect(struct hvcs_struct *hvcsd);
-static void hvcs_partner_free(struct hvcs_struct *hvcsd);
-
-static int hvcs_enable_device(struct hvcs_struct *hvcsd,
- uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
-static void hvcs_final_close(struct hvcs_struct *hvcsd);
-
-static void destroy_hvcs_struct(struct kobject *kobj);
-static int hvcs_open(struct tty_struct *tty, struct file *filp);
-static void hvcs_close(struct tty_struct *tty, struct file *filp);
-static void hvcs_hangup(struct tty_struct * tty);
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
-static int __devinit hvcs_probe(struct vio_dev *dev,
- const struct vio_device_id *id);
-static int __devexit hvcs_remove(struct vio_dev *dev);
-static int __init hvcs_module_init(void);
-static void __exit hvcs_module_exit(void);
-
-#define HVCS_SCHED_READ 0x00000001
-#define HVCS_QUICK_READ 0x00000002
-#define HVCS_TRY_WRITE 0x00000004
-#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
-
-static void hvcs_kick(void)
-{
- hvcs_kicked = 1;
- wmb();
- wake_up_process(hvcs_task);
-}
-
-static void hvcs_unthrottle(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- hvcs_kick();
-}
-
-static void hvcs_throttle(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_disable_interrupts(hvcsd->vdev);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-}
-
-/*
- * If the device is being removed we don't have to worry about this interrupt
- * handler taking any further interrupts because they are disabled which means
- * the hvcs_struct will always be valid in this handler.
- */
-static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance,
- struct pt_regs *regs)
-{
- struct hvcs_struct *hvcsd = dev_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_disable_interrupts(hvcsd->vdev);
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- hvcs_kick();
-
- return IRQ_HANDLED;
-}
-
-/* This function must be called with the hvcsd->lock held */
-static void hvcs_try_write(struct hvcs_struct *hvcsd)
-{
- unsigned int unit_address = hvcsd->vdev->unit_address;
- struct tty_struct *tty = hvcsd->tty;
- int sent;
-
- if (hvcsd->todo_mask & HVCS_TRY_WRITE) {
- /* won't send partial writes */
- sent = hvc_put_chars(unit_address,
- &hvcsd->buffer[0],
- hvcsd->chars_in_buffer );
- if (sent > 0) {
- hvcsd->chars_in_buffer = 0;
- wmb();
- hvcsd->todo_mask &= ~(HVCS_TRY_WRITE);
- wmb();
-
- /*
- * We are still obligated to deliver the data to the
- * hypervisor even if the tty has been closed because
- * we commited to delivering it. But don't try to wake
- * a non-existent tty.
- */
- if (tty) {
- if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP))
- && tty->ldisc.write_wakeup)
- (tty->ldisc.write_wakeup) (tty);
- wake_up_interruptible(&tty->write_wait);
- }
- }
- }
-}
-
-static int hvcs_io(struct hvcs_struct *hvcsd)
-{
- unsigned int unit_address;
- struct tty_struct *tty;
- char buf[HVCS_BUFF_LEN] __ALIGNED__;
- unsigned long flags;
- int got;
- int i;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- unit_address = hvcsd->vdev->unit_address;
- tty = hvcsd->tty;
-
- hvcs_try_write(hvcsd);
-
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) {
- hvcsd->todo_mask &= ~(HVCS_READ_MASK);
- goto bail;
- } else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
- goto bail;
-
- /* remove the read masks */
- hvcsd->todo_mask &= ~(HVCS_READ_MASK);
-
- if ((tty->flip.count + HVCS_BUFF_LEN) < TTY_FLIPBUF_SIZE) {
- got = hvc_get_chars(unit_address,
- &buf[0],
- HVCS_BUFF_LEN);
- for (i=0;got && i<got;i++)
- tty_insert_flip_char(tty, buf[i], TTY_NORMAL);
- }
-
- /* Give the TTY time to process the data we just sent. */
- if (got)
- hvcsd->todo_mask |= HVCS_QUICK_READ;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (tty->flip.count) {
- /* This is synch because tty->low_latency == 1 */
- tty_flip_buffer_push(tty);
- }
-
- if (!got) {
- /* Do this _after_ the flip_buffer_push */
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_enable_interrupts(hvcsd->vdev);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
-
- return hvcsd->todo_mask;
-
- bail:
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return hvcsd->todo_mask;
-}
-
-static int khvcsd(void *unused)
-{
- struct hvcs_struct *hvcsd = NULL;
- struct list_head *element;
- struct list_head *safe_temp;
- int hvcs_todo_mask;
- unsigned long structs_flags;
-
- __set_current_state(TASK_RUNNING);
-
- do {
- hvcs_todo_mask = 0;
- hvcs_kicked = 0;
- wmb();
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
- list_for_each_safe(element, safe_temp, &hvcs_structs) {
- hvcsd = list_entry(element, struct hvcs_struct, next);
- hvcs_todo_mask |= hvcs_io(hvcsd);
- }
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- /*
- * If any of the hvcs adapters want to try a write or quick read
- * don't schedule(), yield a smidgen then execute the hvcs_io
- * thread again for those that want the write.
- */
- if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) {
- yield();
- continue;
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- if (!hvcs_kicked)
- schedule();
- __set_current_state(TASK_RUNNING);
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-static struct vio_device_id hvcs_driver_table[] __devinitdata= {
- {"serial-server", "hvterm2"},
- { 0, }
-};
-MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
-
-/* callback when the kboject ref count reaches zero */
-static void destroy_hvcs_struct(struct kobject *kobj)
-{
- struct hvcs_struct *hvcsd = from_kobj(kobj);
- struct vio_dev *vdev;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- /* the list_del poisons the pointers */
- list_del(&(hvcsd->next));
-
- if (hvcsd->connected == 1) {
- hvcs_partner_free(hvcsd);
- printk(KERN_INFO "HVCS: Closed vty-server@%X and"
- " partner vty@%X:%d connection.\n",
- hvcsd->vdev->unit_address,
- hvcsd->p_unit_address,
- (unsigned int)hvcsd->p_partition_ID);
- }
- printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
- hvcsd->vdev->unit_address);
-
- vdev = hvcsd->vdev;
- hvcsd->vdev = NULL;
-
- hvcsd->p_unit_address = 0;
- hvcsd->p_partition_ID = 0;
- memset(&hvcsd->p_location_code[0], 0x00, CLC_LENGTH);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- hvcs_remove_device_attrs(vdev);
-
- kfree(hvcsd);
-}
-
-/* This function must be called with hvcsd->lock held. */
-static void hvcs_final_close(struct hvcs_struct *hvcsd)
-{
- vio_disable_interrupts(hvcsd->vdev);
- free_irq(hvcsd->vdev->irq, hvcsd);
-
- hvcsd->todo_mask = 0;
-
- /* These two may be redundant if the operation was a close. */
- if (hvcsd->tty) {
- hvcsd->tty->driver_data = NULL;
- hvcsd->tty = NULL;
- }
-
- hvcsd->open_count = 0;
-
- memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
- hvcsd->chars_in_buffer = 0;
-}
-
-static struct kobj_type hvcs_kobj_type = {
- .release = destroy_hvcs_struct,
-};
-
-static int __devinit hvcs_probe(
- struct vio_dev *dev,
- const struct vio_device_id *id)
-{
- struct hvcs_struct *hvcsd;
- unsigned long structs_flags;
-
- if (!dev || !id) {
- printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
- return -EPERM;
- }
-
- hvcsd = kmalloc(sizeof(*hvcsd), GFP_KERNEL);
- if (!hvcsd) {
- return -ENODEV;
- }
-
- /* hvcsd->tty is zeroed out with the memset */
- memset(hvcsd, 0x00, sizeof(*hvcsd));
-
- hvcsd->lock = SPIN_LOCK_UNLOCKED;
- /* Automatically incs the refcount the first time */
- kobject_init(&hvcsd->kobj);
- /* Set up the callback for terminating the hvcs_struct's life */
- hvcsd->kobj.ktype = &hvcs_kobj_type;
-
- hvcsd->vdev = dev;
- dev->dev.driver_data = hvcsd;
-
- hvcsd->index = ++hvcs_struct_count;
- hvcsd->chars_in_buffer = 0;
- hvcsd->todo_mask = 0;
- hvcsd->connected = 0;
-
- /*
- * This will populate the hvcs_struct's partner info fields for the
- * first time.
- */
- if (hvcs_get_pi(hvcsd)) {
- printk(KERN_ERR "HVCS: Failed to fetch partner"
- " info for vty-server@%X on device probe.\n",
- hvcsd->vdev->unit_address);
- }
-
- /*
- * If a user app opens a tty that corresponds to this vty-server before
- * the hvcs_struct has been added to the devices list then the user app
- * will get -ENODEV.
- */
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
-
- list_add_tail(&(hvcsd->next), &hvcs_structs);
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- hvcs_create_device_attrs(hvcsd);
-
- printk(KERN_INFO "HVCS: Added vty-server@%X.\n", dev->unit_address);
-
- /*
- * DON'T enable interrupts here because there is no user to receive the
- * data.
- */
- return 0;
-}
-
-static int __devexit hvcs_remove(struct vio_dev *dev)
-{
- struct hvcs_struct *hvcsd = dev->dev.driver_data;
- unsigned long flags;
- struct kobject *kobjp;
- struct tty_struct *tty;
-
- if (!hvcsd)
- return -ENODEV;
-
- /* By this time the vty-server won't be getting any more interrups */
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- tty = hvcsd->tty;
-
- kobjp = &hvcsd->kobj;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * Let the last holder of this object cause it to be removed, which
- * would probably be tty_hangup below.
- */
- kobject_put (kobjp);
-
- /*
- * The hangup is a scheduled function which will auto chain call
- * hvcs_hangup. The tty should always be valid at this time unless a
- * simultaneous tty close already cleaned up the hvcs_struct.
- */
- if (tty)
- tty_hangup(tty);
-
- printk(KERN_INFO "HVCS: vty-server@%X removed from the"
- " vio bus.\n", dev->unit_address);
- return 0;
-};
-
-static struct vio_driver hvcs_vio_driver = {
- .name = hvcs_driver_name,
- .id_table = hvcs_driver_table,
- .probe = hvcs_probe,
- .remove = hvcs_remove,
-};
-
-/* Only called from hvcs_get_pi please */
-static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
-{
- int clclength;
-
- hvcsd->p_unit_address = pi->unit_address;
- hvcsd->p_partition_ID = pi->partition_ID;
- clclength = strlen(&pi->location_code[0]);
- if (clclength > CLC_LENGTH - 1)
- clclength = CLC_LENGTH - 1;
-
- /* copy the null-term char too */
- strncpy(&hvcsd->p_location_code[0],
- &pi->location_code[0], clclength + 1);
-}
-
-/*
- * Traverse the list and add the partner info that is found to the hvcs_struct
- * struct entry. NOTE: At this time I know that partner info will return a
- * single entry but in the future there may be multiple partner info entries per
- * vty-server and you'll want to zero out that list and reset it. If for some
- * reason you have an old version of this driver but there IS more than one
- * partner info then hvcsd->p_* will hold the last partner info data from the
- * firmware query. A good way to update this code would be to replace the three
- * partner info fields in hvcs_struct with a list of hvcs_partner_info
- * instances.
- *
- * This function must be called with the hvcsd->lock held.
- */
-static int hvcs_get_pi(struct hvcs_struct *hvcsd)
-{
- /* struct hvcs_partner_info *head_pi = NULL; */
- struct hvcs_partner_info *pi = NULL;
- unsigned int unit_address = hvcsd->vdev->unit_address;
- struct list_head head;
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcs_pi_lock, flags);
- if (!hvcs_pi_buff) {
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
- return -EFAULT;
- }
- retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff);
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
- if (retval) {
- printk(KERN_ERR "HVCS: Failed to fetch partner"
- " info for vty-server@%x.\n", unit_address);
- return retval;
- }
-
- /* nixes the values if the partner vty went away */
- hvcsd->p_unit_address = 0;
- hvcsd->p_partition_ID = 0;
-
- list_for_each_entry(pi, &head, node)
- hvcs_set_pi(pi, hvcsd);
-
- hvcs_free_partner_info(&head);
- return 0;
-}
-
-/*
- * This function is executed by the driver "rescan" sysfs entry. It shouldn't
- * be executed elsewhere, in order to prevent deadlock issues.
- */
-static int hvcs_rescan_devices_list(void)
-{
- struct hvcs_struct *hvcsd = NULL;
- unsigned long flags;
- unsigned long structs_flags;
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
-
- list_for_each_entry(hvcsd, &hvcs_structs, next) {
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcs_get_pi(hvcsd);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- return 0;
-}
-
-/*
- * Farm this off into its own function because it could be more complex once
- * multiple partners support is added. This function should be called with
- * the hvcsd->lock held.
- */
-static int hvcs_has_pi(struct hvcs_struct *hvcsd)
-{
- if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID))
- return 0;
- return 1;
-}
-
-/*
- * NOTE: It is possible that the super admin removed a partner vty and then
- * added a different vty as the new partner.
- *
- * This function must be called with the hvcsd->lock held.
- */
-static int hvcs_partner_connect(struct hvcs_struct *hvcsd)
-{
- int retval;
- unsigned int unit_address = hvcsd->vdev->unit_address;
-
- /*
- * If there wasn't any pi when the device was added it doesn't meant
- * there isn't any now. This driver isn't notified when a new partner
- * vty is added to a vty-server so we discover changes on our own.
- * Please see comments in hvcs_register_connection() for justification
- * of this bizarre code.
- */
- retval = hvcs_register_connection(unit_address,
- hvcsd->p_partition_ID,
- hvcsd->p_unit_address);
- if (!retval) {
- hvcsd->connected = 1;
- return 0;
- } else if (retval != -EINVAL)
- return retval;
-
- /*
- * As per the spec re-get the pi and try again if -EINVAL after the
- * first connection attempt.
- */
- if (hvcs_get_pi(hvcsd))
- return -ENOMEM;
-
- if (!hvcs_has_pi(hvcsd))
- return -ENODEV;
-
- retval = hvcs_register_connection(unit_address,
- hvcsd->p_partition_ID,
- hvcsd->p_unit_address);
- if (retval != -EINVAL) {
- hvcsd->connected = 1;
- return retval;
- }
-
- /*
- * EBUSY is the most likely scenario though the vty could have been
- * removed or there really could be an hcall error due to the parameter
- * data but thanks to ambiguous firmware return codes we can't really
- * tell.
- */
- printk(KERN_INFO "HVCS: vty-server or partner"
- " vty is busy. Try again later.\n");
- return -EBUSY;
-}
-
-/* This function must be called with the hvcsd->lock held */
-static void hvcs_partner_free(struct hvcs_struct *hvcsd)
-{
- int retval;
- do {
- retval = hvcs_free_connection(hvcsd->vdev->unit_address);
- } while (retval == -EBUSY);
- hvcsd->connected = 0;
-}
-
-/* This helper function must be called WITHOUT the hvcsd->lock held */
-static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
- unsigned int irq, struct vio_dev *vdev)
-{
- unsigned long flags;
-
- /*
- * It is possible that the vty-server was removed between the time that
- * the conn was registered and now.
- */
- if (!request_irq(irq, &hvcs_handle_interrupt,
- SA_INTERRUPT, "ibmhvcs", hvcsd)) {
- /*
- * It is possible the vty-server was removed after the irq was
- * requested but before we have time to enable interrupts.
- */
- if (vio_enable_interrupts(vdev) == H_Success)
- return 0;
- else {
- printk(KERN_ERR "HVCS: int enable failed for"
- " vty-server@%X.\n", unit_address);
- free_irq(irq, hvcsd);
- }
- } else
- printk(KERN_ERR "HVCS: irq req failed for"
- " vty-server@%X.\n", unit_address);
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcs_partner_free(hvcsd);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- return -ENODEV;
-
-}
-
-/*
- * This always increments the kobject ref count if the call is successful.
- * Please remember to dec when you are done with the instance.
- *
- * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
- * calling this function or you will get deadlock.
- */
-struct hvcs_struct *hvcs_get_by_index(int index)
-{
- struct hvcs_struct *hvcsd = NULL;
- struct list_head *element;
- struct list_head *safe_temp;
- unsigned long flags;
- unsigned long structs_flags;
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
- /* We can immediately discard OOB requests */
- if (index >= 0 && index < HVCS_MAX_SERVER_ADAPTERS) {
- list_for_each_safe(element, safe_temp, &hvcs_structs) {
- hvcsd = list_entry(element, struct hvcs_struct, next);
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (hvcsd->index == index) {
- kobject_get(&hvcsd->kobj);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- spin_unlock_irqrestore(&hvcs_structs_lock,
- structs_flags);
- return hvcsd;
- }
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
- hvcsd = NULL;
- }
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
- return hvcsd;
-}
-
-/*
- * This is invoked via the tty_open interface when a user app connects to the
- * /dev node.
- */
-static int hvcs_open(struct tty_struct *tty, struct file *filp)
-{
- struct hvcs_struct *hvcsd = NULL;
- int retval = 0;
- unsigned long flags;
- unsigned int irq;
- struct vio_dev *vdev;
- unsigned long unit_address;
-
- if (tty->driver_data)
- goto fast_open;
-
- /*
- * Is there a vty-server that shares the same index?
- * This function increments the kobject index.
- */
- if (!(hvcsd = hvcs_get_by_index(tty->index))) {
- printk(KERN_WARNING "HVCS: open failed, no index.\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- if (hvcsd->connected == 0)
- if ((retval = hvcs_partner_connect(hvcsd)))
- goto error_release;
-
- hvcsd->open_count = 1;
- hvcsd->tty = tty;
- tty->driver_data = hvcsd;
-
- /*
- * Set this driver to low latency so that we actually have a chance at
- * catching a throttled TTY after we flip_buffer_push. Otherwise the
- * flush_to_async may not execute until after the kernel_thread has
- * yielded and resumed the next flip_buffer_push resulting in data
- * loss.
- */
- tty->low_latency = 1;
-
- memset(&hvcsd->buffer[0], 0x3F, HVCS_BUFF_LEN);
-
- /*
- * Save these in the spinlock for the enable operations that need them
- * outside of the spinlock.
- */
- irq = hvcsd->vdev->irq;
- vdev = hvcsd->vdev;
- unit_address = hvcsd->vdev->unit_address;
-
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * This must be done outside of the spinlock because it requests irqs
- * and will grab the spinlcok and free the connection if it fails.
- */
- if ((hvcs_enable_device(hvcsd, unit_address, irq, vdev))) {
- kobject_put(&hvcsd->kobj);
- printk(KERN_WARNING "HVCS: enable device failed.\n");
- return -ENODEV;
- }
-
- goto open_success;
-
-fast_open:
- hvcsd = tty->driver_data;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (!kobject_get(&hvcsd->kobj)) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_ERR "HVCS: Kobject of open"
- " hvcs doesn't exist.\n");
- return -EFAULT; /* Is this the right return value? */
- }
-
- hvcsd->open_count++;
-
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-open_success:
- hvcs_kick();
-
- printk(KERN_INFO "HVCS: vty-server@%X opened.\n",
- hvcsd->vdev->unit_address );
-
- return 0;
-
-error_release:
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- kobject_put(&hvcsd->kobj);
-
- printk(KERN_WARNING "HVCS: HVCS partner connect failed.\n");
- return retval;
-}
-
-static void hvcs_close(struct tty_struct *tty, struct file *filp)
-{
- struct hvcs_struct *hvcsd;
- unsigned long flags;
- struct kobject *kobjp;
-
- /*
- * Is someone trying to close the file associated with this device after
- * we have hung up? If so tty->driver_data wouldn't be valid.
- */
- if (tty_hung_up_p(filp))
- return;
-
- /*
- * No driver_data means that this close was probably issued after a
- * failed hvcs_open by the tty layer's release_dev() api and we can just
- * exit cleanly.
- */
- if (!tty->driver_data)
- return;
-
- hvcsd = tty->driver_data;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (--hvcsd->open_count == 0) {
-
- /*
- * This line is important because it tells hvcs_open that this
- * device needs to be re-configured the next time hvcs_open is
- * called.
- */
- hvcsd->tty->driver_data = NULL;
-
- /*
- * NULL this early so that the kernel_thread doesn't try to
- * execute any operations on the TTY even though it is obligated
- * to deliver any pending I/O to the hypervisor.
- */
- hvcsd->tty = NULL;
-
- /*
- * Block the close until all the buffered data has been
- * delivered.
- */
- while(hvcsd->chars_in_buffer) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * Give the kernel thread the hvcs_struct so that it can
- * try to deliver the remaining data but block the close
- * operation by spinning in this function so that other
- * tty operations have to wait.
- */
- yield();
- spin_lock_irqsave(&hvcsd->lock, flags);
- }
-
- hvcs_final_close(hvcsd);
-
- } else if (hvcsd->open_count < 0) {
- printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
- " is missmanaged.\n",
- hvcsd->vdev->unit_address, hvcsd->open_count);
- }
- kobjp = &hvcsd->kobj;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- kobject_put(kobjp);
-}
-
-static void hvcs_hangup(struct tty_struct * tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int temp_open_count;
- struct kobject *kobjp;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- /* Preserve this so that we know how many kobject refs to put */
- temp_open_count = hvcsd->open_count;
-
- /*
- * Don't kobject put inside the spinlock because the destruction
- * callback may use the spinlock and it may get called before the
- * spinlock has been released. Get a pointer to the kobject and
- * kobject_put on that instead.
- */
- kobjp = &hvcsd->kobj;
-
- /* Calling this will drop any buffered data on the floor. */
- hvcs_final_close(hvcsd);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * We need to kobject_put() for every open_count we have since the
- * tty_hangup() function doesn't invoke a close per open connection on a
- * non-console device.
- */
- while(temp_open_count) {
- --temp_open_count;
- /*
- * The final put will trigger destruction of the hvcs_struct.
- * NOTE: If this hangup was signaled from user space then the
- * final put will never happen.
- */
- kobject_put(kobjp);
- }
-}
-
-/*
- * NOTE: This is almost always from_user since user level apps interact with the
- * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by
- * hvcs_remove (which removes the target device and executes tty_hangup()) that
- * tty_hangup will allow hvcs_write time to complete execution before it
- * terminates our device.
- */
-static int hvcs_write(struct tty_struct *tty, int from_user,
- const unsigned char *buf, int count)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned int unit_address;
- unsigned char *charbuf;
- unsigned long flags;
- int total_sent = 0;
- int tosend = 0;
- int result = 0;
-
- /*
- * If they don't check the return code off of their open they may
- * attempt this even if there is no connected device.
- */
- if (!hvcsd)
- return -ENODEV;
-
- /* Reasonable size to prevent user level flooding */
- if (count > HVCS_MAX_FROM_USER) {
- printk(KERN_WARNING "HVCS write: count being truncated to"
- " HVCS_MAX_FROM_USER.\n");
- count = HVCS_MAX_FROM_USER;
- }
-
- if (!from_user)
- charbuf = (unsigned char *)buf;
- else {
- charbuf = kmalloc(count, GFP_KERNEL);
- if (!charbuf) {
- printk(KERN_WARNING "HVCS: write -ENOMEM.\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(charbuf, buf, count)) {
- kfree(charbuf);
- printk(KERN_WARNING "HVCS: write -EFAULT.\n");
- return -EFAULT;
- }
- }
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- /*
- * Somehow an open succedded but the device was removed or the
- * connection terminated between the vty-server and partner vty during
- * the middle of a write operation? This is a crummy place to do this
- * but we want to keep it all in the spinlock.
- */
- if (hvcsd->open_count <= 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (from_user)
- kfree(charbuf);
- return -ENODEV;
- }
-
- unit_address = hvcsd->vdev->unit_address;
-
- while (count > 0) {
- tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
- /*
- * No more space, this probably means that the last call to
- * hvcs_write() didn't succeed and the buffer was filled up.
- */
- if (!tosend)
- break;
-
- memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer],
- &charbuf[total_sent],
- tosend);
-
- hvcsd->chars_in_buffer += tosend;
-
- result = 0;
-
- /*
- * If this is true then we don't want to try writing to the
- * hypervisor because that is the kernel_threads job now. We'll
- * just add to the buffer.
- */
- if (!(hvcsd->todo_mask & HVCS_TRY_WRITE))
- /* won't send partial writes */
- result = hvc_put_chars(unit_address,
- &hvcsd->buffer[0],
- hvcsd->chars_in_buffer);
-
- /*
- * Since we know we have enough room in hvcsd->buffer for
- * tosend we record that it was sent regardless of whether the
- * hypervisor actually took it because we have it buffered.
- */
- total_sent+=tosend;
- count-=tosend;
- if (result == 0) {
- hvcsd->todo_mask |= HVCS_TRY_WRITE;
- hvcs_kick();
- break;
- }
-
- hvcsd->chars_in_buffer = 0;
- /*
- * Test after the chars_in_buffer reset otherwise this could
- * deadlock our writes if hvc_put_chars fails.
- */
- if (result < 0)
- break;
- }
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (from_user)
- kfree(charbuf);
-
- if (result == -1)
- return -EIO;
- else
- return total_sent;
-}
-
-/*
- * This is really asking how much can we guarentee that we can send or that we
- * absolutely WILL BUFFER if we can't send it. This driver MUST honor the
- * return value, hence the reason for hvcs_struct buffering.
- */
-static int hvcs_write_room(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int retval;
-
- if (!hvcsd || hvcsd->open_count <= 0)
- return 0;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static int hvcs_chars_in_buffer(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = hvcsd->chars_in_buffer;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static struct tty_operations hvcs_ops = {
- .open = hvcs_open,
- .close = hvcs_close,
- .hangup = hvcs_hangup,
- .write = hvcs_write,
- .write_room = hvcs_write_room,
- .chars_in_buffer = hvcs_chars_in_buffer,
- .unthrottle = hvcs_unthrottle,
- .throttle = hvcs_throttle,
-};
-
-static int __init hvcs_module_init(void)
-{
- int rc;
- int num_ttys_to_alloc;
-
- printk(KERN_INFO "Initializing %s\n", hvcs_driver_string);
-
- /* Has the user specified an overload with an insmod param? */
- if (hvcs_parm_num_devs <= 0 ||
- (hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) {
- num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS;
- } else
- num_ttys_to_alloc = hvcs_parm_num_devs;
-
- hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc);
- if (!hvcs_tty_driver)
- return -ENOMEM;
-
- hvcs_tty_driver->owner = THIS_MODULE;
-
- hvcs_tty_driver->driver_name = hvcs_driver_name;
- hvcs_tty_driver->name = hvcs_device_node;
-
- /*
- * We'll let the system assign us a major number, indicated by leaving
- * it blank.
- */
-
- hvcs_tty_driver->minor_start = HVCS_MINOR_START;
- hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
-
- /*
- * We role our own so that we DONT ECHO. We can't echo because the
- * device we are connecting to already echoes by default and this would
- * throw us into a horrible recursive echo-echo-echo loop.
- */
- hvcs_tty_driver->init_termios = hvcs_tty_termios;
- hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW;
-
- tty_set_operations(hvcs_tty_driver, &hvcs_ops);
-
- /*
- * The following call will result in sysfs entries that denote the
- * dynamically assigned major and minor numbers for our devices.
- */
- if (tty_register_driver(hvcs_tty_driver)) {
- printk(KERN_ERR "HVCS: registration "
- " as a tty driver failed.\n");
- put_tty_driver(hvcs_tty_driver);
- return rc;
- }
-
- hvcs_structs_lock = SPIN_LOCK_UNLOCKED;
-
- hvcs_pi_lock = SPIN_LOCK_UNLOCKED;
- hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
- if (IS_ERR(hvcs_task)) {
- printk("khvcsd creation failed. Driver not loaded.\n");
- kfree(hvcs_pi_buff);
- put_tty_driver(hvcs_tty_driver);
- return -EIO;
- }
-
- rc = vio_register_driver(&hvcs_vio_driver);
-
- /*
- * This needs to be done AFTER the vio_register_driver() call or else
- * the kobjects won't be initialized properly.
- */
- hvcs_create_driver_attrs();
-
- printk(KERN_INFO "HVCS: driver module inserted.\n");
-
- return rc;
-}
-
-static void __exit hvcs_module_exit(void)
-{
- unsigned long flags;
-
- /*
- * This driver receives hvcs_remove callbacks for each device upon
- * module removal.
- */
-
- /*
- * This synchronous operation will wake the khvcsd kthread if it is
- * asleep and will return when khvcsd has terminated.
- */
- kthread_stop(hvcs_task);
-
- spin_lock_irqsave(&hvcs_pi_lock, flags);
- kfree(hvcs_pi_buff);
- hvcs_pi_buff = NULL;
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
-
- hvcs_remove_driver_attrs();
-
- vio_unregister_driver(&hvcs_vio_driver);
-
- tty_unregister_driver(hvcs_tty_driver);
-
- put_tty_driver(hvcs_tty_driver);
-
- printk(KERN_INFO "HVCS: driver module removed.\n");
-}
-
-module_init(hvcs_module_init);
-module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
- return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, const char * buf,
- size_t count)
-{
- /*
- * Don't need this feature at the present time because firmware doesn't
- * yet support multiple partners.
- */
- printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
- return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static DEVICE_ATTR(current_vty,
- S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, const char *buf,
- size_t count)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
-
- /* writing a '0' to this sysfs entry will result in the disconnect. */
- if (simple_strtol(buf, NULL, 0) != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- if (hvcsd->open_count > 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. "
- "The hvcs device node is still in use.\n");
- return -EPERM;
- }
-
- if (hvcsd->connected == 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. The"
- " vty-server is not connected to a vty.\n");
- return -EPERM;
- }
-
- hvcs_partner_free(hvcsd);
- printk(KERN_INFO "HVCS: Closed vty-server@%X and"
- " partner vty@%X:%d connection.\n",
- hvcsd->vdev->unit_address,
- hvcsd->p_unit_address,
- (unsigned int)hvcsd->p_partition_ID);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%d\n", hvcsd->connected);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
- hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static struct attribute *hvcs_attrs[] = {
- &dev_attr_partner_vtys.attr,
- &dev_attr_partner_clcs.attr,
- &dev_attr_current_vty.attr,
- &dev_attr_vterm_state.attr,
- NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
- .attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
- struct vio_dev *vdev = hvcsd->vdev;
- sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
- sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
- /* A 1 means it is updating, a 0 means it is done updating */
- return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
- size_t count)
-{
- if ((simple_strtol(buf, NULL, 0) != 1)
- && (hvcs_rescan_status != 0))
- return -EINVAL;
-
- hvcs_rescan_status = 1;
- printk(KERN_INFO "HVCS: rescanning partner info for all"
- " vty-servers.\n");
- hvcs_rescan_devices_list();
- hvcs_rescan_status = 0;
- return count;
-}
-static DRIVER_ATTR(rescan,
- S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_remove_file(driverfs, &driver_attr_rescan);
-}
static void set_params (i2ChanStrPtr, struct termios *);
static int set_modem_info(i2ChanStrPtr, unsigned int, unsigned int *);
-static int get_serial_info(i2ChanStrPtr, struct serial_struct __user *);
-static int set_serial_info(i2ChanStrPtr, struct serial_struct __user *);
+static int get_serial_info(i2ChanStrPtr, struct serial_struct *);
+static int set_serial_info(i2ChanStrPtr, struct serial_struct *);
-static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t ip2_ipl_read(struct file *, char *, size_t, loff_t *);
+static ssize_t ip2_ipl_write(struct file *, const char *, size_t, loff_t *);
static int ip2_ipl_ioctl(struct inode *, struct file *, UINT, ULONG);
static int ip2_ipl_open(struct inode *, struct file *);
-static int DumpTraceBuffer(char __user *, int);
-static int DumpFifoBuffer( char __user *, int);
+static int DumpTraceBuffer(char *, int);
+static int DumpFifoBuffer( char *, int);
static void ip2_init_board(int);
static unsigned short find_eisa_board(int);
/******************************************************************************/
static inline void
-service_all_boards(void)
+service_all_boards()
{
int i;
i2eBordStrPtr pB;
wait_queue_t wait;
i2ChanStrPtr pCh = DevTable[tty->index];
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
+ struct serial_icounter_struct *p_cuser; /* user space */
int rc = 0;
unsigned long flags;
- void __user *argp = (void __user *)arg;
if ( pCh == NULL ) {
return -ENODEV;
ip2trace (CHANN, ITRC_IOCTL, 2, 1, rc );
- rc = get_serial_info(pCh, argp);
+ rc = get_serial_info(pCh, (struct serial_struct *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 3, 1, rc );
- rc = set_serial_info(pCh, argp);
+ rc = set_serial_info(pCh, (struct serial_struct *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 6, 1, rc );
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 7, 1, rc );
- rc = get_user(arg,(unsigned long __user *) argp);
+ rc = get_user(arg,(unsigned long *) arg);
if (rc)
return rc;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL)
save_flags(flags);cli();
cnow = pCh->icount;
restore_flags(flags);
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
rc = put_user(cnow.cts, &p_cuser->cts);
rc = put_user(cnow.dsr, &p_cuser->dsr);
rc = put_user(cnow.rng, &p_cuser->rng);
/* standard Linux serial structure. */
/******************************************************************************/
static int
-get_serial_info ( i2ChanStrPtr pCh, struct serial_struct __user *retinfo )
+get_serial_info ( i2ChanStrPtr pCh, struct serial_struct *retinfo )
{
struct serial_struct tmp;
+ int rc;
+
+ if ( !retinfo ) {
+ return -EFAULT;
+ }
memset ( &tmp, 0, sizeof(tmp) );
tmp.type = pCh->pMyBord->channelBtypes.bid_value[(pCh->port_index & (IP2_PORTS_PER_BOARD-1))/16];
tmp.close_delay = pCh->ClosingDelay;
tmp.closing_wait = pCh->ClosingWaitTime;
tmp.custom_divisor = pCh->BaudDivisor;
- return copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ rc = copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ return rc;
}
/******************************************************************************/
/* change the IRQ, address or type of the port the ioctl fails. */
/******************************************************************************/
static int
-set_serial_info( i2ChanStrPtr pCh, struct serial_struct __user *new_info )
+set_serial_info( i2ChanStrPtr pCh, struct serial_struct *new_info )
{
struct serial_struct ns;
int old_flags, old_baud_divisor;
- if (copy_from_user(&ns, new_info, sizeof (ns)))
+ if ( !new_info ) {
return -EFAULT;
+ }
+
+ if (copy_from_user(&ns, new_info, sizeof (ns))) {
+ return -EFAULT;
+ }
/*
* We don't allow setserial to change IRQ, board address, type or baud
static
ssize_t
-ip2_ipl_read(struct file *pFile, char __user *pData, size_t count, loff_t *off )
+ip2_ipl_read(struct file *pFile, char *pData, size_t count, loff_t *off )
{
unsigned int minor = iminor(pFile->f_dentry->d_inode);
int rc = 0;
}
static int
-DumpFifoBuffer ( char __user *pData, int count )
+DumpFifoBuffer ( char *pData, int count )
{
#ifdef DEBUG_FIFO
int rc;
}
static int
-DumpTraceBuffer ( char __user *pData, int count )
+DumpTraceBuffer ( char *pData, int count )
{
#ifdef IP2DEBUG_TRACE
int rc;
int dumpcount;
int chunk;
- int *pIndex = (int __user *)pData;
+ int *pIndex = (int*)pData;
if ( count < (sizeof(int) * 6) ) {
return -EIO;
/* */
/******************************************************************************/
static ssize_t
-ip2_ipl_write(struct file *pFile, const char __user *pData, size_t count, loff_t *off)
+ip2_ipl_write(struct file *pFile, const char *pData, size_t count, loff_t *off)
{
#ifdef IP2DEBUG_IPL
printk (KERN_DEBUG "IP2IPL: write %p, %d bytes\n", pData, count );
{
unsigned int iplminor = iminor(pInode);
int rc = 0;
- void __user *argp = (void __user *)arg;
- ULONG __user *pIndex = argp;
+ ULONG *pIndex = (ULONG*)arg;
i2eBordStrPtr pB = i2BoardPtrTable[iplminor / 4];
i2ChanStrPtr pCh;
case 65: /* Board - ip2stat */
if ( pB ) {
- rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
+ rc = copy_to_user((char*)arg, (char*)pB, sizeof(i2eBordStr) );
rc = put_user(INB(pB->i2eStatus),
- (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
+ (ULONG*)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
} else {
rc = -ENODEV;
}
pCh = DevTable[cmd];
if ( pCh )
{
- rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+ rc = copy_to_user((char*)arg, (char*)pCh, sizeof(i2ChanStr) );
} else {
rc = -ENODEV;
}
{
int rv;
struct ipmi_addr addr;
- struct kernel_ipmi_msg msg;
+ unsigned char *msgdata;
if (req->addr_len > sizeof(struct ipmi_addr))
return -EINVAL;
if (copy_from_user(&addr, req->addr, req->addr_len))
return -EFAULT;
- msg.netfn = req->msg.netfn;
- msg.cmd = req->msg.cmd;
- msg.data_len = req->msg.data_len;
- msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!msg.data)
+ msgdata = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msgdata)
return -ENOMEM;
/* From here out we cannot return, we must jump to "out" for
goto out;
}
- if (copy_from_user(msg.data,
+ if (copy_from_user(msgdata,
req->msg.data,
req->msg.data_len))
{
goto out;
}
} else {
- msg.data_len = 0;
+ req->msg.data_len = 0;
}
+ req->msg.data = msgdata;
rv = ipmi_request_settime(user,
&addr,
req->msgid,
- &msg,
+ &(req->msg),
NULL,
0,
retries,
retry_time_ms);
out:
- kfree(msg.data);
+ kfree(msgdata);
return rv;
}
}
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
struct ipmi_ipmb_addr *ipmb_addr,
long msgid,
unsigned char ipmb_seq,
}
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
struct ipmi_lan_addr *lan_addr,
long msgid,
unsigned char ipmb_seq,
ipmi_smi_t intf,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
goto out_err;
}
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
for (m=0; m<smi_msg->data_size; m++)
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority)
{
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
int retries,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
static int
send_channel_info_cmd(ipmi_smi_t intf, int chan)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
unsigned char data[1];
struct ipmi_system_interface_addr si;
msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
msg->data_size = 11;
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
printk("Invalid command:");
int requeue;
int chan;
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
int m;
printk("Recv:");
for (m=0; m<msg->rsp_size; m++)
MC, which don't get resent. */
intf->handlers->sender(intf->send_info, smi_msg, 0);
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
printk("Resend: ");
static void send_panic_events(char *str)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
ipmi_smi_t intf;
unsigned char data[16];
int i;
200 /* priority: INT_MAX >= x >= 0 */
};
-static int ipmi_init_msghandler(void)
+static __init int ipmi_init_msghandler(void)
{
int i;
return 0;
}
-static __init int ipmi_init_msghandler_mod(void)
-{
- ipmi_init_msghandler();
- return 0;
-}
-
static __exit void cleanup_ipmi(void)
{
int count;
}
module_exit(cleanup_ipmi);
-module_init(ipmi_init_msghandler_mod);
+module_init(ipmi_init_msghandler);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ipmi_alloc_recv_msg);
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
#ifdef CONFIG_HIGH_RES_TIMERS
#include <linux/hrtime.h>
# if defined(schedule_next_int)
static int acpi_failure = 0;
/* For GPE-type interrupts. */
-void ipmi_acpi_gpe(void *context)
+u32 ipmi_acpi_gpe(void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
smi_event_handler(smi_info, 0);
out:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return 0;
}
static int acpi_gpe_irq_setup(struct smi_info *info)
#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
#endif
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-static int nowayout = 1;
-#else
-static int nowayout;
-#endif
-
static ipmi_user_t watchdog_user = NULL;
/* Default the timeout to 10 seconds. */
module_param(start_now, int, 0);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
-module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
/* Default state of the timer. */
static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
struct ipmi_recv_msg *recv_msg,
int *send_heartbeat_now)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
unsigned char data[6];
int rv;
struct ipmi_system_interface_addr addr;
static int ipmi_heartbeat(void)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
static void panic_halt_ipmi_heartbeat(void)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
struct ipmi_system_interface_addr addr;
{
int rv;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
rv = ipmi_heartbeat();
if (rv)
int rv = 0;
wait_queue_t wait;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count <= 0)
return 0;
/* Don't start the timer now, let it start on the
first heartbeat. */
ipmi_start_timer_on_heartbeat = 1;
- return nonseekable_open(ino, filep);
+ return(0);
default:
return (-ENODEV);
{
if (iminor(ino)==WATCHDOG_MINOR)
{
- if (!nowayout) {
- ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
- }
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+#endif
ipmi_wdog_open = 0;
}
static void isicom_tx(unsigned long _data);
static void isicom_start(struct tty_struct * tty);
-static unsigned char * tmp_buf;
+static unsigned char * tmp_buf = 0;
static DECLARE_MUTEX(tmp_buf_sem);
/* baud index mappings from linux defns to isi */
unsigned long t;
unsigned short word_count, base;
bin_frame frame;
- void __user *argp = (void __user *)arg;
/* exec_record exec_rec; */
- if(get_user(card, (int __user *)argp))
+ if(get_user(card, (int *)arg))
return -EFAULT;
if(card < 0 || card >= BOARD_COUNT)
return -EIO;
}
printk("-Done\n");
- return put_user(signature,(unsigned __user *)argp);
+ return put_user(signature,(unsigned int*)arg);
case MIOCTL_LOAD_FIRMWARE:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, argp, sizeof(bin_frame)))
+ if(copy_from_user(&frame, (void *) arg, sizeof(bin_frame)))
return -EFAULT;
if (WaitTillCardIsFree(base))
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, argp, sizeof(bin_header)))
+ if(copy_from_user(&frame, (void *) arg, sizeof(bin_header)))
return -EFAULT;
if (WaitTillCardIsFree(base))
return -EIO;
}
- if(copy_to_user(argp, &frame, sizeof(bin_frame)))
+ if(copy_to_user((void *) arg, &frame, sizeof(bin_frame)))
return -EFAULT;
return 0;
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
}
static int isicom_set_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+ struct serial_struct * info)
{
struct serial_struct newinfo;
unsigned long flags;
}
static int isicom_get_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+ struct serial_struct * info)
{
struct serial_struct out_info;
unsigned int cmd, unsigned long arg)
{
struct isi_port * port = (struct isi_port *) tty->driver_data;
- void __user *argp = (void __user *)arg;
int retval;
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return 0;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(arg, (unsigned long __user *) argp))
+ if(get_user(arg, (unsigned long *) arg))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
return 0;
case TIOCGSERIAL:
- return isicom_get_serial_info(port, argp);
+ return isicom_get_serial_info(port,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
- return isicom_set_serial_info(port, argp);
+ return isicom_set_serial_info(port,
+ (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
isicom_shutdown_port(port);
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
static int stli_brdinit(stlibrd_t *brdp);
static int stli_startbrd(stlibrd_t *brdp);
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
+static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp);
+static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp);
static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp);
static void stli_poll(unsigned long arg);
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts);
static long stli_mktiocm(unsigned long sigvalue);
static void stli_read(stlibrd_t *brdp, stliport_t *portp);
-static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp);
-static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp);
-static int stli_getbrdstats(combrd_t __user *bp);
-static int stli_getportstats(stliport_t *portp, comstats_t __user *cp);
+static int stli_getserial(stliport_t *portp, struct serial_struct *sp);
+static int stli_setserial(stliport_t *portp, struct serial_struct *sp);
+static int stli_getbrdstats(combrd_t *bp);
+static int stli_getportstats(stliport_t *portp, comstats_t *cp);
static int stli_portcmdstats(stliport_t *portp);
-static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
-static int stli_getportstruct(stliport_t __user *arg);
-static int stli_getbrdstruct(stlibrd_t __user *arg);
+static int stli_clrportstats(stliport_t *portp, comstats_t *cp);
+static int stli_getportstruct(unsigned long arg);
+static int stli_getbrdstruct(unsigned long arg);
static void *stli_memalloc(int len);
static stlibrd_t *stli_allocbrd(void);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j;
-#ifdef DEBUG
+#if DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stli_argbrds(void)
+static void stli_argbrds()
{
stlconf_t conf;
stlibrd_t *brdp;
int nrargs, i;
-#ifdef DEBUG
+#if DEBUG
printk("stli_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#ifdef DEBUG
+#if DEBUG
printk("stli_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
unsigned int minordev;
int brdnr, portnr, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
stliport_t *portp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
asyport_t aport;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_initopen(brdp=%x,portp=%x)\n", (int) brdp, (int) portp);
#endif
unsigned long flags;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_rawopen(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
unsigned long flags;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_rawclose(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_cmdwait(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
stlibrd_t *brdp;
asyport_t aport;
-#ifdef DEBUG
+#if DEBUG
printk("stli_setport(portp=%x)\n", (int) portp);
#endif
static void stli_delay(int len)
{
-#ifdef DEBUG
+#if DEBUG
printk("stli_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned long flags;
int rc, doclocal;
-#ifdef DEBUG
+#if DEBUG
printk("stli_waitcarrier(brdp=%x,portp=%x,filp=%x)\n",
(int) brdp, (int) portp, (int) filp);
#endif
unsigned int len, stlen, head, tail, size;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
static void stli_putchar(struct tty_struct *tty, unsigned char ch)
{
-#ifdef DEBUG
+#if DEBUG
printk("stli_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
unsigned char *buf, *shbuf;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_flushchars(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
+static int stli_getserial(stliport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
stlibrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
+static int stli_setserial(stliport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
int rc;
-#ifdef DEBUG
- printk("stli_setserial(portp=%p,sp=%p)\n", portp, sp);
+#if DEBUG
+ printk("stli_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
stlibrd_t *brdp;
unsigned int ival;
int rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stli_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) arg);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned __user *) arg)) == 0)
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0)
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
break;
case TIOCGSERIAL:
- rc = stli_getserial(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stli_getserial(portp, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = stli_setserial(portp, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stli_setserial(portp, (struct serial_struct *)arg);
break;
case STL_GETPFLAG:
- rc = put_user(portp->pflag, (unsigned __user *)argp);
+ rc = put_user(portp->pflag, (unsigned int *) arg);
break;
case STL_SETPFLAG:
- if ((rc = get_user(portp->pflag, (unsigned __user *)argp)) == 0)
+ if ((rc = get_user(portp->pflag, (unsigned int *) arg)) == 0)
stli_setport(portp);
break;
case COM_GETPORTSTATS:
- rc = stli_getportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stli_getportstats(portp, (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stli_clrportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stli_clrportstats(portp, (comstats_t *) arg);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
struct termios *tiosp;
asyport_t aport;
-#ifdef DEBUG
+#if DEBUG
printk("stli_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_throttle(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_unthrottle(tty=%x)\n", (int) tty);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk("stli_stop(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk("stli_start(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_dohangup(portp=%x)\n", (int) arg);
#endif
stlibrd_t *brdp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_hangup(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
unsigned long ftype, flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_flushbuffer(tty=%x)\n", (int) tty);
#endif
long arg;
/* long savestate, savetime; */
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stliport_t *portp;
unsigned long tend;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_waituntilsent(tty=%x,timeout=%x)\n", (int) tty, timeout);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
volatile unsigned char *bits;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_sendcmd(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
unsigned int head, tail, size;
unsigned int len, stlen;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_read(brdp=%x,portp=%d)\n",
(int) brdp, (int) portp);
#endif
unsigned long oldsigs;
int rc, donerx;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_hostcmd(brdp=%x,channr=%d)\n",
(int) brdp, channr);
#endif
static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tiosp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mkasyport(portp=%x,pp=%x,tiosp=%d)\n",
(int) portp, (int) pp, (int) tiosp);
#endif
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mkasysigs(sp=%x,dtr=%d,rts=%d)\n",
(int) sp, dtr, rts);
#endif
{
long tiocm;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mktiocm(sigvalue=%x)\n", (int) sigvalue);
#endif
stliport_t *portp;
int i, panelnr, panelport;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initports(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_ecpenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpenable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATENABLE, (brdp->iobase + ECP_ATCONFR));
static void stli_ecpdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpdisable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_ATPAGESIZE);
static void stli_ecpreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_ecpintr(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpintr(brdp=%x)\n", (int) brdp);
#endif
outb(0x1, brdp->iobase);
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpeiinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpeigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_EIPAGESIZE);
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_MCPAGESIZE);
static void stli_ecppciinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecppciinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecppcigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), board=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_PCIPAGESIZE);
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbenable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATENABLE), (brdp->iobase + ONB_ATCONFR));
static void stli_onbdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbdisable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATDISABLE), (brdp->iobase + ONB_ATCONFR));
{
void *ptr;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
} else {
ptr = brdp->membase + (offset % ONB_ATPAGESIZE);
}
static void stli_onbreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbreset(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbeinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbeenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbeenable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIENABLE, (brdp->iobase + ONB_EICONFR));
static void stli_onbedisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbedisable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbegetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ONB_EIPAGESIZE);
static void stli_onbereset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_onbereset(brdp=%x)\n", (int) brdp);
#endif
static void stli_bbyinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_bbyinit(brdp=%d)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_bbygetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % BBY_PAGESIZE);
static void stli_bbyreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_bbyreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_stalinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalinit(brdp=%d)\n", (int) brdp);
#endif
{
void *ptr;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
} else {
ptr = brdp->membase + (offset % STAL_PAGESIZE);
}
{
volatile unsigned long *vecp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalreset(brdp=%x)\n", (int) brdp);
#endif
char *name;
int panelnr, nrports;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initecp(brdp=%x)\n", (int) brdp);
#endif
char *name;
int i;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initonb(brdp=%x)\n", (int) brdp);
#endif
stliport_t *portp;
int portnr, nrdevs, i, rc;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_startbrd(brdp=%x)\n", (int) brdp);
#endif
static int __init stli_brdinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_brdinit(brdp=%x)\n", (int) brdp);
#endif
cdkonbsig_t onbsig, *onbsigp;
int i, foundit;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_eisamemprobe(brdp=%x)\n", (int) brdp);
#endif
if (! foundit) {
brdp->memaddr = 0;
- brdp->membase = NULL;
+ brdp->membase = 0;
printk(KERN_ERR "STALLION: failed to probe shared memory "
"region for %s in EISA slot=%d\n",
stli_brdnames[brdp->brdtype], (brdp->iobase >> 12));
return(0);
}
-static inline int stli_getbrdnr(void)
-{
- int i;
-
- for (i = 0; i < STL_MAXBRDS; i++) {
- if (!stli_brds[i]) {
- if (i >= stli_nrbrds)
- stli_nrbrds = i + 1;
- return i;
- }
- }
- return -1;
-}
-
/*****************************************************************************/
/*
* do is go probing around in the usual places hoping we can find it.
*/
-static inline int stli_findeisabrds(void)
+static inline int stli_findeisabrds()
{
stlibrd_t *brdp;
unsigned int iobase, eid;
int i;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_findeisabrds()\n");
#endif
* Find the next available board number that is free.
*/
+static inline int stli_getbrdnr()
+{
+ int i;
+
+ for (i = 0; (i < STL_MAXBRDS); i++) {
+ if (stli_brds[i] == (stlibrd_t *) NULL) {
+ if (i >= stli_nrbrds)
+ stli_nrbrds = i + 1;
+ return(i);
+ }
+ }
+ return(-1);
+}
+
/*****************************************************************************/
#ifdef CONFIG_PCI
{
stlibrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n",
brdtype, dev->bus->number, dev->devfn);
#endif
}
brdp->brdtype = brdtype;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "%s(%d): BAR[]=%lx,%lx,%lx,%lx\n", __FILE__, __LINE__,
pci_resource_start(devp, 0),
pci_resource_start(devp, 1),
* one as it is found.
*/
-static inline int stli_findpcibrds(void)
+static inline int stli_findpcibrds()
{
struct pci_dev *dev = NULL;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_findpcibrds()\n");
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlibrd_t *stli_allocbrd(void)
+static stlibrd_t *stli_allocbrd()
{
stlibrd_t *brdp;
* can find.
*/
-static inline int stli_initbrds(void)
+static inline int stli_initbrds()
{
stlibrd_t *brdp, *nxtbrdp;
stlconf_t *confp;
int i, j;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initbrds()\n");
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp)
+static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
int brdnr, size, n;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memread(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp)
+static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
- char __user *chbuf;
+ char *chbuf;
int brdnr, size, n;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memwrite(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
if (fp->f_pos >= brdp->memsize)
return(0);
- chbuf = (char __user *) buf;
+ chbuf = (char *) buf;
size = MIN(count, (brdp->memsize - fp->f_pos));
save_flags(flags);
* Return the board stats structure to user app.
*/
-static int stli_getbrdstats(combrd_t __user *bp)
+static int stli_getbrdstats(combrd_t *bp)
{
stlibrd_t *brdp;
int i;
* what port to get stats for (used through board control device).
*/
-static int stli_getportstats(stliport_t *portp, comstats_t __user *cp)
+static int stli_getportstats(stliport_t *portp, comstats_t *cp)
{
stlibrd_t *brdp;
int rc;
- if (!portp) {
+ if (portp == (stliport_t *) NULL) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (!portp)
- return -ENODEV;
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
}
brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
if ((rc = stli_portcmdstats(portp)) < 0)
- return rc;
+ return(rc);
return copy_to_user(cp, &stli_comstats, sizeof(comstats_t)) ?
-EFAULT : 0;
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp)
+static int stli_clrportstats(stliport_t *portp, comstats_t *cp)
{
stlibrd_t *brdp;
int rc;
- if (!portp) {
+ if (portp == (stliport_t *) NULL) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (!portp)
- return -ENODEV;
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
}
brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
if (brdp->state & BST_STARTED) {
- if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0)
- return rc;
+ if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, 0, 0, 0)) < 0)
+ return(rc);
}
memset(&stli_comstats, 0, sizeof(comstats_t));
if (copy_to_user(cp, &stli_comstats, sizeof(comstats_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
* Return the entire driver ports structure to a user app.
*/
-static int stli_getportstruct(stliport_t __user *arg)
+static int stli_getportstruct(unsigned long arg)
{
stliport_t *portp;
- if (copy_from_user(&stli_dummyport, arg, sizeof(stliport_t)))
+ if (copy_from_user(&stli_dummyport, (void *)arg, sizeof(stliport_t)))
return -EFAULT;
portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
stli_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- if (copy_to_user(arg, portp, sizeof(stliport_t)))
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
+ if (copy_to_user((void *) arg, portp, sizeof(stliport_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stli_getbrdstruct(stlibrd_t __user *arg)
+static int stli_getbrdstruct(unsigned long arg)
{
stlibrd_t *brdp;
- if (copy_from_user(&stli_dummybrd, arg, sizeof(stlibrd_t)))
+ if (copy_from_user(&stli_dummybrd, (void *)arg, sizeof(stlibrd_t)))
return -EFAULT;
if ((stli_dummybrd.brdnr < 0) || (stli_dummybrd.brdnr >= STL_MAXBRDS))
- return -ENODEV;
+ return(-ENODEV);
brdp = stli_brds[stli_dummybrd.brdnr];
- if (!brdp)
- return -ENODEV;
- if (copy_to_user(arg, brdp, sizeof(stlibrd_t)))
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
+ if (copy_to_user((void *) arg, brdp, sizeof(stlibrd_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
{
stlibrd_t *brdp;
int brdnr, rc, done;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n",
(int) ip, (int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- rc = stli_getportstats(NULL, argp);
+ rc = stli_getportstats((stliport_t *)NULL, (comstats_t *)arg);
done++;
break;
case COM_CLRPORTSTATS:
- rc = stli_clrportstats(NULL, argp);
+ rc = stli_clrportstats((stliport_t *)NULL, (comstats_t *)arg);
done++;
break;
case COM_GETBRDSTATS:
- rc = stli_getbrdstats(argp);
+ rc = stli_getbrdstats((combrd_t *) arg);
done++;
break;
case COM_READPORT:
- rc = stli_getportstruct(argp);
+ rc = stli_getportstruct(arg);
done++;
break;
case COM_READBOARD:
- rc = stli_getbrdstruct(argp);
+ rc = stli_getbrdstruct(arg);
done++;
break;
}
if (brdnr >= STL_MAXBRDS)
return(-ENODEV);
brdp = stli_brds[brdnr];
- if (!brdp)
+ if (brdp == (stlibrd_t *) NULL)
return(-ENODEV);
if (brdp->state == 0)
return(-ENODEV);
}
#endif
+extern int page_is_ram(unsigned long pagenr);
+
+static inline int page_is_allowed(unsigned long pagenr)
+{
+ #ifdef CONFIG_X86
+ if (pagenr <= 256)
+ return 1;
+ if (!page_is_ram(pagenr))
+ return 1;
+ printk("Access to 0x%lx by %s denied \n", pagenr << PAGE_SHIFT, current->comm);
+ return 0;
+ #else
+ return 1;
+ #endif
+}
+
static inline int range_is_allowed(unsigned long from, unsigned long to)
{
unsigned long cursor;
cursor = from >> PAGE_SHIFT;
- while ((cursor << PAGE_SHIFT) < to) {
- if (!devmem_is_allowed(cursor))
+ while ( (cursor << PAGE_SHIFT) < to) {
+ if (!page_is_allowed(cursor))
return 0;
cursor++;
}
}
#endif
if (!range_is_allowed(realp, realp+count))
- return -EPERM;
+ return -EFAULT;
copied = copy_from_user(p, buf, count);
if (copied) {
ssize_t ret = written + (count - copied);
}
#endif
if (!range_is_allowed(p, p+count))
- return -EPERM;
+ return -EFAULT;
if (copy_to_user(buf, __va(p), count))
return -EFAULT;
read += count;
cursor = vma->vm_pgoff;
while ((cursor << PAGE_SHIFT) < offset + vma->vm_end-vma->vm_start) {
- if (!devmem_is_allowed(cursor))
- return -EPERM;
+ if (!page_is_allowed(cursor))
+ return -EFAULT;
cursor++;
}
return virtr + read;
}
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+ return -EPERM;
+
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+ if (count > (unsigned long) high_memory - p)
+ wrote = (unsigned long) high_memory - p;
+
+ written = do_write_mem((void*)p, p, buf, wrote, ppos);
+ if (written != wrote)
+ return written;
+ wrote = written;
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
+ ssize_t ret;
+
+ free_page((unsigned long)kbuf);
+ ret = wrote + virtr + (len - written);
+ return ret ? ret : -EFAULT;
+ }
+ }
+ len = vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote;
+}
+
#if defined(CONFIG_ISA) || !defined(__mc68000__)
static ssize_t read_port(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
static struct file_operations kmem_fops = {
.llseek = memory_lseek,
.read = read_kmem,
+ .write = write_kmem,
.mmap = mmap_kmem,
.open = open_kmem,
};
"CP-204J series",
};
-#ifdef CONFIG_PCI
static struct pci_device_id moxa_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C218, PCI_ANY_ID, PCI_ANY_ID,
0, 0, MOXA_BOARD_C218_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
-#endif /* CONFIG_PCI */
typedef struct _moxa_isa_board_conf {
int boardType;
static int verbose = 0;
static int ttymajor = MOXAMAJOR;
/* Variables for insmod */
-#ifdef MODULE
static int baseaddr[] = {0, 0, 0, 0};
static int type[] = {0, 0, 0, 0};
static int numports[] = {0, 0, 0, 0};
-#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
/*
* static functions:
*/
+static int moxa_get_PCI_conf(struct pci_dev *, int, moxa_board_conf *);
static void do_moxa_softint(void *);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static void MoxaPortTxEnable(int);
static int MoxaPortResetBrkCnt(int);
static void MoxaPortSendBreak(int, int);
-static int moxa_get_serial_info(struct moxa_str *, struct serial_struct __user *);
-static int moxa_set_serial_info(struct moxa_str *, struct serial_struct __user *);
+static int moxa_get_serial_info(struct moxa_str *, struct serial_struct *);
+static int moxa_set_serial_info(struct moxa_str *, struct serial_struct *);
static void MoxaSetFifo(int port, int enable);
static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
-#ifdef CONFIG_PCI
-static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
-{
- board->baseAddr = pci_resource_start (p, 2);
- board->boardType = board_type;
- switch (board_type) {
- case MOXA_BOARD_C218_ISA:
- case MOXA_BOARD_C218_PCI:
- board->numPorts = 8;
- break;
-
- case MOXA_BOARD_CP204J:
- board->numPorts = 4;
- break;
- default:
- board->numPorts = 0;
- break;
- }
- board->busType = MOXA_BUS_TYPE_PCI;
- board->pciInfo.busNum = p->bus->number;
- board->pciInfo.devNum = p->devfn >> 3;
-
- return (0);
-}
-#endif /* CONFIG_PCI */
-
static int __init moxa_init(void)
{
int i, numBoards;
moxaDriver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(moxaDriver, &moxa_ops);
- moxaXmitBuff = NULL;
+ moxaXmitBuff = 0;
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
ch->type = PORT_16550A;
ch->port = i;
INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
- ch->tty = NULL;
+ ch->tty = 0;
ch->close_delay = 5 * HZ / 10;
ch->closing_wait = 30 * HZ;
ch->count = 0;
module_init(moxa_init);
module_exit(moxa_exit);
+static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
+{
+ board->baseAddr = pci_resource_start (p, 2);
+ board->boardType = board_type;
+ switch (board_type) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ board->numPorts = 8;
+ break;
+
+ case MOXA_BOARD_CP204J:
+ board->numPorts = 4;
+ break;
+ default:
+ board->numPorts = 0;
+ break;
+ }
+ board->busType = MOXA_BUS_TYPE_PCI;
+ board->pciInfo.busNum = p->bus->number;
+ board->pciInfo.devNum = p->devfn >> 3;
+
+ return (0);
+}
+
static void do_moxa_softint(void *private_)
{
struct moxa_str *ch = (struct moxa_str *) private_;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
ch->event = 0;
- ch->tty = NULL;
+ ch->tty = 0;
if (ch->blocked_open) {
if (ch->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
{
struct moxa_str *ch = (struct moxa_str *) tty->driver_data;
register int port;
- void __user *argp = (void __user *)arg;
int retval;
port = PORTNO(tty);
MoxaPortSendBreak(ch->port, arg);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(retval, (unsigned long __user *) argp))
+ if(get_user(retval, (unsigned long *) arg))
return -EFAULT;
arg = retval;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
ch->asyncflags |= ASYNC_CHECK_CD;
return (0);
case TIOCGSERIAL:
- return moxa_get_serial_info(ch, argp);
+ return (moxa_get_serial_info(ch, (struct serial_struct *) arg));
case TIOCSSERIAL:
- return moxa_set_serial_info(ch, argp);
+ return (moxa_set_serial_info(ch, (struct serial_struct *) arg));
default:
retval = MoxaDriverIoctl(cmd, arg, port);
}
ch->event = 0;
ch->count = 0;
ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
- ch->tty = NULL;
+ ch->tty = 0;
wake_up_interruptible(&ch->open_wait);
}
unsigned char *charptr, *flagptr;
unsigned long flags;
- ts = NULL;
+ ts = 0;
tp = ch->tty;
if (tp)
ts = tp->termios;
static void moxafunc(unsigned long, int, ushort);
static void wait_finish(unsigned long);
static void low_water_check(unsigned long);
-static int moxaloadbios(int, unsigned char __user *, int);
+static int moxaloadbios(int, unsigned char *, int);
static int moxafindcard(int);
-static int moxaload320b(int, unsigned char __user *, int);
-static int moxaloadcode(int, unsigned char __user *, int);
+static int moxaload320b(int, unsigned char *, int);
+static int moxaloadcode(int, unsigned char *, int);
static int moxaloadc218(int, unsigned long, int);
static int moxaloadc320(int, unsigned long, int, int *);
};
struct dl_str {
- char __user *buf;
+ char *buf;
int len;
int cardno;
};
int i;
int status;
int MoxaPortTxQueue(int), MoxaPortRxQueue(int);
- void __user *argp = (void __user *)arg;
if (port == QueryPort) {
if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) &&
}
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user(argp, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
+ if(copy_to_user((void *)arg, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
return -EFAULT;
return (0);
case MOXA_INIT_DRIVER:
return (0);
case MOXA_GETDATACOUNT:
moxaLog.tick = jiffies;
- if(copy_to_user(argp, &moxaLog, sizeof(mon_st)))
+ if(copy_to_user((void *)arg, &moxaLog, sizeof(mon_st)))
return -EFAULT;
return (0);
case MOXA_FLUSH_QUEUE:
temp_queue[i].outq = MoxaPortTxQueue(i);
}
}
- if(copy_to_user(argp, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
+ if(copy_to_user((void *)arg, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
return -EFAULT;
return (0);
case MOXA_GET_OQUEUE:
i = MoxaPortTxQueue(port);
- return put_user(i, (unsigned long __user *)argp);
+ return put_user(i, (unsigned long *) arg);
case MOXA_GET_IQUEUE:
i = MoxaPortRxQueue(port);
- return put_user(i, (unsigned long __user *)argp);
+ return put_user(i, (unsigned long *) arg);
case MOXA_GET_MAJOR:
- if(copy_to_user(argp, &ttymajor, sizeof(int)))
+ if(copy_to_user((void *)arg, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
i = 0;
- if(copy_to_user(argp, &i, sizeof(int)))
+ if(copy_to_user((void *)arg, &i, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GETMSTATUS:
else
GMStatus[i].cflag = moxaChannels[i].tty->termios->c_cflag;
}
- if(copy_to_user(argp, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
+ if(copy_to_user((void *)arg, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
return -EFAULT;
return 0;
default:
break;
}
- if(copy_from_user(&dltmp, argp, sizeof(struct dl_str)))
+ if(copy_from_user(&dltmp, (void *)arg, sizeof(struct dl_str)))
return -EFAULT;
if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS)
return -EINVAL;
}
static int moxa_get_serial_info(struct moxa_str *info,
- struct serial_struct __user *retinfo)
+ struct serial_struct *retinfo)
{
struct serial_struct tmp;
+ if (!retinfo)
+ return (-EFAULT);
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->port;
static int moxa_set_serial_info(struct moxa_str *info,
- struct serial_struct __user *new_info)
+ struct serial_struct *new_info)
{
struct serial_struct new_serial;
}
}
-static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
+static int moxaloadbios(int cardno, unsigned char *tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaload320b(int cardno, unsigned char __user *tmp, int len)
+static int moxaload320b(int cardno, unsigned char * tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
+static int moxaloadcode(int cardno, unsigned char * tmp, int len)
{
unsigned long baseAddr, ofsAddr;
int retval, port, i;
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %p file %p\n",
- inode, file);
+ "mwavedd::mwave_open, entry inode %x file %x\n",
+ (int) inode, (int) file);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_open, exit return retval %x\n", retval);
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %p file %p\n",
- inode, file);
+ "mwavedd::mwave_close, entry inode %x file %x\n",
+ (int) inode, (int) file);
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
retval);
void __user *arg = (void __user *)ioarg;
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry inode %p file %p cmd %x arg %x\n",
- inode, file, iocmd, (int) ioarg);
+ "mwavedd::mwave_ioctl, entry inode %x file %x cmd %x arg %x\n",
+ (int) inode, (int) file, iocmd, (int) ioarg);
switch (iocmd) {
loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
+ "mwavedd::mwave_read entry file %p, buf %p, count %x ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_write entry file %p, buf %p,"
- " count %zx ppos %p\n",
+ " count %x ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
static irqreturn_t UartInterrupt(int irq, void *dev_id, struct pt_regs *regs)
{
PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ "tp3780i::UartInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
return IRQ_HANDLED;
}
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ "tp3780i::DspInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
PRINTK_2(TRACE_TP3780I,
pSettings->bPllBypass = TP_CFG_PllBypass;
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
- if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
+ if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", 0)) {
PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
- if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
+ if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", 0)) {
PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
goto exit_cleanup;
} else {
#define MOXA_GET_CUMAJOR (MOXA + 64)
#define MOXA_GETMSTATUS (MOXA + 65)
-#ifdef CONFIG_PCI
static struct pci_device_id mxser_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C168, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
MXSER_BOARD_C168_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-#endif /* CONFIG_PCI */
static int ioaddr[MXSER_BOARDS];
static int ttymajor = MXSERMAJOR;
static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
+static int mxser_get_PCI_conf(struct pci_dev *, int, struct mxser_hwconf *);
static void mxser_do_softint(void *);
static int mxser_open(struct tty_struct *, struct file *);
static void mxser_close(struct tty_struct *, struct file *);
static int mxser_startup(struct mxser_struct *);
static void mxser_shutdown(struct mxser_struct *);
static int mxser_change_speed(struct mxser_struct *, struct termios *old_termios);
-static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_get_lsr_info(struct mxser_struct *, unsigned int __user *);
+static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct *);
+static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct *);
+static int mxser_get_lsr_info(struct mxser_struct *, unsigned int *);
static void mxser_send_break(struct mxser_struct *, int);
static int mxser_tiocmget(struct tty_struct *, struct file *);
static int mxser_tiocmset(struct tty_struct *, struct file *, unsigned int, unsigned int);
mxsercfg[board] = *hwconf;
}
-#ifdef CONFIG_PCI
static int mxser_get_PCI_conf(struct pci_dev *pdev, int board_type, struct mxser_hwconf *hwconf)
{
int i;
}
return (0);
}
-#endif /* CONFIG_PCI */
static struct tty_operations mxser_ops = {
.open = mxser_open,
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
struct mxser_struct *info = (struct mxser_struct *) tty->driver_data;
int retval;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
+ struct serial_icounter_struct *p_cuser; /* user space */
unsigned long templ;
- void __user *argp = (void __user *)arg;
if (PORTNO(tty) == MXSER_PORTS)
return (mxser_ioctl_special(cmd, arg));
mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(templ, (unsigned long __user *) arg))
+ if(get_user(templ, (unsigned long *) arg))
return -EFAULT;
arg = templ;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return (0);
case TIOCGSERIAL:
- return mxser_get_serial_info(info, argp);
+ return (mxser_get_serial_info(info, (struct serial_struct *) arg));
case TIOCSSERIAL:
- return mxser_set_serial_info(info, argp);
+ return (mxser_set_serial_info(info, (struct serial_struct *) arg));
case TIOCSERGETLSR: /* Get line status register */
- return mxser_get_lsr_info(info, argp);
+ return (mxser_get_lsr_info(info, (unsigned int *) arg));
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
cli();
cnow = info->icount;
restore_flags(flags);
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
if(put_user(cnow.cts, &p_cuser->cts))
return -EFAULT;
if(put_user(cnow.dsr, &p_cuser->dsr))
return -EFAULT;
return put_user(cnow.dcd, &p_cuser->dcd);
case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
+ return put_user(info->baud_base != 115200 ? 1 : 0, (int *) arg);
default:
return (-ENOIOCTLCMD);
}
static int mxser_ioctl_special(unsigned int cmd, unsigned long arg)
{
int i, result, status;
- void __user *argp = (void __user *)arg;
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user(argp, mxsercfg,
+ if(copy_to_user((struct mxser_hwconf *) arg, mxsercfg,
sizeof(struct mxser_hwconf) * 4))
return -EFAULT;
return 0;
case MOXA_GET_MAJOR:
- if(copy_to_user(argp, &ttymajor, sizeof(int)))
+ if(copy_to_user((int *) arg, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
result = 0;
- if(copy_to_user(argp, &result, sizeof(int)))
+ if(copy_to_user((int *) arg, &result, sizeof(int)))
return -EFAULT;
return 0;
if (mxvar_table[i].base)
result |= (1 << i);
}
- return put_user(result, (unsigned long __user *) argp);
+ return put_user(result, (unsigned long *) arg);
case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ if(copy_to_user((struct mxser_log *) arg, &mxvar_log, sizeof(mxvar_log)))
return -EFAULT;
return (0);
case MOXA_GETMSTATUS:
else
GMStatus[i].cts = 0;
}
- if(copy_to_user(argp, GMStatus,
+ if(copy_to_user((struct mxser_mstatus *) arg, GMStatus,
sizeof(struct mxser_mstatus) * MXSER_PORTS))
return -EFAULT;
return 0;
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
int pass_counter = 0;
int handled = 0;
- port = NULL;
+ port = 0;
for (i = 0; i < MXSER_BOARDS; i++) {
if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
port = dev_id;
/*
* and set the speed of the serial port
*/
- mxser_change_speed(info, NULL);
+ mxser_change_speed(info, 0);
info->flags |= ASYNC_INITIALIZED;
restore_flags(flags);
*/
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
info->IER = 0;
outb(0x00, info->base + UART_IER); /* disable all intrs */
* ------------------------------------------------------------
*/
static int mxser_get_serial_info(struct mxser_struct *info,
- struct serial_struct __user *retinfo)
+ struct serial_struct *retinfo)
{
struct serial_struct tmp;
}
static int mxser_set_serial_info(struct mxser_struct *info,
- struct serial_struct __user *new_info)
+ struct serial_struct *new_info)
{
struct serial_struct new_serial;
unsigned int flags;
if (info->flags & ASYNC_INITIALIZED) {
if (flags != (info->flags & ASYNC_SPD_MASK)) {
- mxser_change_speed(info, NULL);
+ mxser_change_speed(info, 0);
}
} else
retval = mxser_startup(info);
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int __user *value)
+static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int *value)
{
unsigned char status;
unsigned int result;
#endif
tty->disc_data = NULL;
if (tty == n_hdlc->backup_tty)
- n_hdlc->backup_tty = NULL;
+ n_hdlc->backup_tty = 0;
if (tty != n_hdlc->tty)
return;
if (n_hdlc->backup_tty) {
struct n_hdlc *n_hdlc = kmalloc(sizeof(*n_hdlc), GFP_KERNEL);
if (!n_hdlc)
- return NULL;
+ return 0;
memset(n_hdlc, 0, sizeof(*n_hdlc));
static inline unsigned char *alloc_buf(void)
{
+ unsigned char *p;
int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- return kmalloc(N_TTY_BUF_SIZE, prio);
- else
- return (unsigned char *)__get_free_page(prio);
+ if (PAGE_SIZE != N_TTY_BUF_SIZE) {
+ p = kmalloc(N_TTY_BUF_SIZE, prio);
+ if (p)
+ memset(p, 0, N_TTY_BUF_SIZE);
+ } else
+ p = (unsigned char *)get_zeroed_page(prio);
+
+ return p;
}
static inline void free_buf(unsigned char *buf)
NVRAM_MINOR);
goto out;
}
- if (!create_proc_read_entry("driver/nvram", 0, NULL, nvram_read_proc,
+ if (!create_proc_read_entry("driver/nvram", 0, 0, nvram_read_proc,
NULL)) {
printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
ret = -ENOMEM;
static void __exit
nvram_cleanup_module(void)
{
- remove_proc_entry("driver/nvram", NULL);
+ remove_proc_entry("driver/nvram", 0);
misc_deregister(&nvram_dev);
}
* device at any one time.
*/
-static int button_read (struct file *filp, char __user *buffer,
+static int button_read (struct file *filp, char *buffer,
size_t count, loff_t *ppos)
{
interruptible_sleep_on (&button_wait_queue);
static void button_sequence_finished (unsigned long parameters);
static irqreturn_t button_handler (int irq, void *dev_id, struct pt_regs *regs);
+static int button_read (struct file *filp, char *buffer,
+ size_t count, loff_t *ppos);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
int button_del_callback (void (*callback) (void));
static void kick_open(void);
static int get_flash_id(void);
static int erase_block(int nBlock);
-static int write_block(unsigned long p, const char __user *buf, int count);
+static int write_block(unsigned long p, const char *buf, int count);
+static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg);
+static ssize_t flash_read(struct file *file, char *buf, size_t count, loff_t * ppos);
+static ssize_t flash_write(struct file *file, const char *buf, size_t count, loff_t * ppos);
+static loff_t flash_llseek(struct file *file, loff_t offset, int orig);
#define KFLASH_SIZE 1024*1024 //1 Meg
#define KFLASH_SIZE4 4*1024*1024 //4 Meg
return 0;
}
-static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
- loff_t *ppos)
+static ssize_t flash_read(struct file *file, char *buf, size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
int ret = 0;
if (flashdebug)
- printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
- "buffer=%p, count=0x%X.\n", p, buf, count);
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, buffer=%p, count=0x%X.\n",
+ p, buf, count);
if (count)
ret = -ENXIO;
return ret;
}
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t size, loff_t * ppos)
+static ssize_t flash_write(struct file *file, const char *buf, size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
break;
}
if (flashdebug)
- printk(KERN_DEBUG "flash_write: writing offset %lX, "
- "from buf %p, bytes left %X.\n", p, buf,
- count - written);
+ printk(KERN_DEBUG "flash_write: writing offset %lX, from buf "
+ "%p, bytes left %X.\n", p, buf, count - written);
/*
* write_block will limit write to space left in this block
/*
* write_block will limit number of bytes written to the space in this block
*/
-static int write_block(unsigned long p, const char __user *buf, int count)
+static int write_block(unsigned long p, const char *buf, int count)
{
volatile unsigned int c1;
volatile unsigned int c2;
#define CHA 0x00 /* channel A offset */
#define CHB 0x40 /* channel B offset */
-
-/*
- * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it.
- */
-#undef PVR
#define RXFIFO 0
#define TXFIFO 0
static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
{
int i = 0;
+ unsigned char status;
/* wait for command completion */
- while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
+ while ((status = read_reg(info, (unsigned char)(channel+STAR)) & BIT2)) {
udelay(1);
if (i++ == 1000)
return FALSE;
} else {
time = jiffies;
}
-#elif defined (__sparc_v9__)
- unsigned long tick = tick_ops->get_tick();
-
- time = (unsigned int) tick;
- num ^= (tick >> 32UL);
#else
time = jiffies;
#endif
}
static int proc_do_poolsize(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
sysctl_poolsize = random_state->poolinfo.POOLBYTES;
- ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, filp, buffer, lenp);
if (ret || !write ||
(sysctl_poolsize == random_state->poolinfo.POOLBYTES))
return ret;
* sysctl system call, it is returned as 16 bytes of binary data.
*/
static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
- return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
+ return proc_dostring(&fake_table, write, filp, buffer, lenp);
}
static int uuid_strategy(ctl_table *table, int __user *name, int nlen,
return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
}
#endif
-
-/*
- * Get a random word:
- */
-unsigned int get_random_int(void)
-{
- unsigned int val = 0;
-
- if (!exec_shield_randomize)
- return 0;
-
-#ifdef CONFIG_X86_HAS_TSC
- rdtscl(val);
-#endif
- val += current->pid + jiffies + (int)&val;
-
- /*
- * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
- * every second, from the entropy pool (and thus creates a limited
- * drain on it), and uses halfMD4Transform within the second. We
- * also spice it with the TSC (if available), jiffies, PID and the
- * stack address:
- */
- return secure_ip_id(val);
-}
-
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
-{
- unsigned long range = end - len - start;
- if (end <= start + len)
- return 0;
- return PAGE_ALIGN(get_random_int() % range + start);
-}
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
}
static inline int rc_set_serial_info(struct riscom_port * port,
- struct serial_struct __user * newinfo)
+ struct serial_struct * newinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
}
static inline int rc_get_serial_info(struct riscom_port * port,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
{
struct riscom_port *port = (struct riscom_port *)tty->driver_data;
- void __user *argp = (void __user *)arg;
int retval;
if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
rc_send_break(port, arg ? arg*(HZ/10) : HZ/4);
break;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned int *) arg);
case TIOCSSOFTCAR:
- if (get_user(arg,(unsigned __user *) argp))
+ if (get_user(arg,(unsigned int *) arg))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
break;
case TIOCGSERIAL:
- return rc_get_serial_info(port, argp);
+ return rc_get_serial_info(port, (struct serial_struct *) arg);
case TIOCSSERIAL:
- return rc_set_serial_info(port, argp);
+ return rc_set_serial_info(port, (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
unsigned m = iminor(file->f_dentry->d_inode);
size_t i;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data+i))
unsigned m = iminor(file->f_dentry->d_inode);
int value;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
value = scx200_gpio_get(m);
if (put_user(value ? '1' : '0', buf))
return -EFAULT;
unsigned m = iminor(inode);
if (m > 63)
return -EINVAL;
- return nonseekable_open(inode, file);
+ return 0;
}
static int scx200_gpio_release(struct inode *inode, struct file *file)
/************************* End of Includes **************************/
/***************************** Prototypes ***************************/
+/* Helper functions */
+static __inline__ volatile struct a2232status *a2232stat(unsigned int board,
+ unsigned int portonboard);
+static __inline__ volatile struct a2232memory *a2232mem (unsigned int board);
+static __inline__ void a2232_receive_char( struct a2232_port *port,
+ int ch, int err );
/* The interrupt service routine */
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp);
/* Initialize the port structures */
static struct zorro_dev *zd_a2232[MAX_A2232_BOARDS];
/***************************** End of Global variables **************/
-/* Helper functions */
-
-static inline volatile struct a2232memory *a2232mem(unsigned int board)
-{
- return (volatile struct a2232memory *)ZTWO_VADDR(zd_a2232[board]->resource.start);
-}
-
-static inline volatile struct a2232status *a2232stat(unsigned int board,
- unsigned int portonboard)
-{
- volatile struct a2232memory *mem = a2232mem(board);
- return &(mem->Status[portonboard]);
-}
-
-static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
-{
-/* Mostly stolen from other drivers.
- Maybe one could implement a more efficient version by not only
- transferring one character at a time.
-*/
- struct tty_struct *tty = port->gs.tty;
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return;
-
- tty->flip.count++;
-
-#if 0
- switch(err) {
- case TTY_BREAK:
- break;
- case TTY_PARITY:
- break;
- case TTY_OVERRUN:
- break;
- case TTY_FRAME:
- break;
- }
-#endif
-
- *tty->flip.flag_buf_ptr++ = err;
- *tty->flip.char_buf_ptr++ = ch;
- tty_flip_buffer_push(tty);
-}
-
/***************************** Functions ****************************/
/*** BEGIN OF REAL_DRIVER FUNCTIONS ***/
}
/*** END OF FUNCTIONS EXPECTED BY TTY DRIVER STRUCTS ***/
+static __inline__ volatile struct a2232status *a2232stat(unsigned int board, unsigned int portonboard)
+{
+ volatile struct a2232memory *mem = a2232mem(board);
+ return &(mem->Status[portonboard]);
+}
+
+static __inline__ volatile struct a2232memory *a2232mem (unsigned int board)
+{
+ return (volatile struct a2232memory *) ZTWO_VADDR( zd_a2232[board]->resource.start );
+}
+
+static __inline__ void a2232_receive_char( struct a2232_port *port,
+ int ch, int err )
+{
+/* Mostly stolen from other drivers.
+ Maybe one could implement a more efficient version by not only
+ transferring one character at a time.
+*/
+ struct tty_struct *tty = port->gs.tty;
+
+ if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ return;
+
+ tty->flip.count++;
+
+#if 0
+ switch(err) {
+ case TTY_BREAK:
+ break;
+ case TTY_PARITY:
+ break;
+ case TTY_OVERRUN:
+ break;
+ case TTY_FRAME:
+ break;
+ }
+#endif
+
+ *tty->flip.flag_buf_ptr++ = err;
+ *tty->flip.char_buf_ptr++ = ch;
+ tty_flip_buffer_push(tty);
+}
+
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp)
{
#if A2232_IOBUFLEN != 256
--- /dev/null
+/*
+ * C-Brick Serial Port (and console) driver for SGI Altix machines.
+ *
+ * This driver is NOT suitable for talking to the l1-controller for
+ * anything other than 'console activities' --- please use the l1
+ * driver for that.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/sysrq.h>
+#include <linux/circ_buf.h>
+#include <linux/serial_reg.h>
+#include <asm/uaccess.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn2/sn_private.h>
+
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+static char sysrq_serial_str[] = "\eSYS";
+static char *sysrq_serial_ptr = sysrq_serial_str;
+static unsigned long sysrq_requested;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+/* minor device number */
+#define SN_SAL_MINOR 64
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 128
+
+/* number of characters we can transmit to the SAL console at a time */
+#define SN_SAL_MAX_CHARS 120
+
+#define SN_SAL_EVENT_WRITE_WAKEUP 0
+
+/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
+ * avoid losing chars, (always has to be a power of 2) */
+#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
+
+#define SN_SAL_UART_FIFO_DEPTH 16
+#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
+
+/* we don't kmalloc/get_free_page these as we want them available
+ * before either of those are initialized */
+static char sn_xmit_buff_mem[SN_SAL_BUFFER_SIZE];
+
+struct volatile_circ_buf {
+ char *cb_buf;
+ int cb_head;
+ int cb_tail;
+};
+
+static struct volatile_circ_buf xmit = { .cb_buf = sn_xmit_buff_mem };
+static char sn_tmp_buffer[SN_SAL_BUFFER_SIZE];
+
+static struct tty_struct *sn_sal_tty;
+
+static struct timer_list sn_sal_timer;
+static int sn_sal_event; /* event type for task queue */
+
+static int sn_sal_is_asynch;
+static int sn_sal_irq;
+static spinlock_t sn_sal_lock = SPIN_LOCK_UNLOCKED;
+static int sn_total_tx_count;
+static int sn_total_rx_count;
+
+static void sn_sal_tasklet_action(unsigned long data);
+static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
+
+static unsigned long sn_interrupt_timeout;
+
+extern u64 master_node_bedrock_address;
+
+#undef DEBUG
+#ifdef DEBUG
+static int sn_debug_printf(const char *fmt, ...);
+#define DPRINTF(x...) sn_debug_printf(x)
+#else
+#define DPRINTF(x...) do { } while (0)
+#endif
+
+struct sn_sal_ops {
+ int (*sal_puts)(const char *s, int len);
+ int (*sal_getc)(void);
+ int (*sal_input_pending)(void);
+ void (*sal_wakeup_transmit)(void);
+};
+
+/* This is the pointer used. It is assigned to point to one of
+ * the tables below.
+ */
+static struct sn_sal_ops *sn_func;
+
+/* Prototypes */
+static int snt_hw_puts(const char *, int);
+static int snt_poll_getc(void);
+static int snt_poll_input_pending(void);
+static int snt_sim_puts(const char *, int);
+static int snt_sim_getc(void);
+static int snt_sim_input_pending(void);
+static int snt_intr_getc(void);
+static int snt_intr_input_pending(void);
+static void sn_intr_transmit_chars(void);
+
+/* A table for polling */
+static struct sn_sal_ops poll_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_poll_getc,
+ .sal_input_pending = snt_poll_input_pending
+};
+
+/* A table for the simulator */
+static struct sn_sal_ops sim_ops = {
+ .sal_puts = snt_sim_puts,
+ .sal_getc = snt_sim_getc,
+ .sal_input_pending = snt_sim_input_pending
+};
+
+/* A table for interrupts enabled */
+static struct sn_sal_ops intr_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_intr_getc,
+ .sal_input_pending = snt_intr_input_pending,
+ .sal_wakeup_transmit = sn_intr_transmit_chars
+};
+
+
+/* the console does output in two distinctly different ways:
+ * synchronous and asynchronous (buffered). initally, early_printk
+ * does synchronous output. any data written goes directly to the SAL
+ * to be output (incidentally, it is internally buffered by the SAL)
+ * after interrupts and timers are initialized and available for use,
+ * the console init code switches to asynchronous output. this is
+ * also the earliest opportunity to begin polling for console input.
+ * after console initialization, console output and tty (serial port)
+ * output is buffered and sent to the SAL asynchronously (either by
+ * timer callback or by UART interrupt) */
+
+
+/* routines for running the console in polling mode */
+
+static int
+snt_hw_puts(const char *s, int len)
+{
+ /* looking at the PROM source code, putb calls the flush
+ * routine, so if we send characters in FIFO sized chunks, it
+ * should go out by the next time the timer gets called */
+ return ia64_sn_console_putb(s, len);
+}
+
+static int
+snt_poll_getc(void)
+{
+ int ch;
+ ia64_sn_console_getc(&ch);
+ return ch;
+}
+
+static int
+snt_poll_input_pending(void)
+{
+ int status, input;
+
+ status = ia64_sn_console_check(&input);
+ return !status && input;
+}
+
+
+/* routines for running the console on the simulator */
+
+static int
+snt_sim_puts(const char *str, int count)
+{
+ int counter = count;
+
+#ifdef FLAG_DIRECT_CONSOLE_WRITES
+ /* This is an easy way to pre-pend the output to know whether the output
+ * was done via sal or directly */
+ writeb('[', master_node_bedrock_address + (UART_TX << 3));
+ writeb('+', master_node_bedrock_address + (UART_TX << 3));
+ writeb(']', master_node_bedrock_address + (UART_TX << 3));
+ writeb(' ', master_node_bedrock_address + (UART_TX << 3));
+#endif /* FLAG_DIRECT_CONSOLE_WRITES */
+ while (counter > 0) {
+ writeb(*str, master_node_bedrock_address + (UART_TX << 3));
+ counter--;
+ str++;
+ }
+
+ return count;
+}
+
+static int
+snt_sim_getc(void)
+{
+ return readb(master_node_bedrock_address + (UART_RX << 3));
+}
+
+static int
+snt_sim_input_pending(void)
+{
+ return readb(master_node_bedrock_address + (UART_LSR << 3)) & UART_LSR_DR;
+}
+
+
+/* routines for an interrupt driven console (normal) */
+
+static int
+snt_intr_getc(void)
+{
+ return ia64_sn_console_readc();
+}
+
+static int
+snt_intr_input_pending(void)
+{
+ return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
+}
+
+/* The early printk (possible setup) and function call */
+
+void
+early_printk_sn_sal(const char *s, unsigned count)
+{
+ extern void early_sn_setup(void);
+
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+
+ early_sn_setup();
+ }
+ sn_func->sal_puts(s, count);
+}
+
+#ifdef DEBUG
+/* this is as "close to the metal" as we can get, used when the driver
+ * itself may be broken */
+static int
+sn_debug_printf(const char *fmt, ...)
+{
+ static char printk_buf[1024];
+ int printed_len;
+ va_list args;
+
+ va_start(args, fmt);
+ printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ early_printk_sn_sal(printk_buf, printed_len);
+ va_end(args);
+ return printed_len;
+}
+#endif /* DEBUG */
+
+/*
+ * Interrupt handling routines.
+ */
+
+static void
+sn_sal_sched_event(int event)
+{
+ sn_sal_event |= (1 << event);
+ tasklet_schedule(&sn_sal_tasklet);
+}
+
+/* sn_receive_chars can be called before sn_sal_tty is initialized. in
+ * that case, its only use is to trigger sysrq and kdb */
+static void
+sn_receive_chars(struct pt_regs *regs, unsigned long *flags)
+{
+ int ch;
+
+ while (sn_func->sal_input_pending()) {
+ ch = sn_func->sal_getc();
+ if (ch < 0) {
+ printk(KERN_ERR "sn_serial: An error occured while "
+ "obtaining data from the console (0x%0x)\n", ch);
+ break;
+ }
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+ if (sysrq_requested) {
+ unsigned long sysrq_timeout = sysrq_requested + HZ*5;
+
+ sysrq_requested = 0;
+ if (ch && time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(&sn_sal_lock, *flags);
+ handle_sysrq(ch, regs, NULL);
+ spin_lock_irqsave(&sn_sal_lock, *flags);
+ /* don't record this char */
+ continue;
+ }
+ }
+ if (ch == *sysrq_serial_ptr) {
+ if (!(*++sysrq_serial_ptr)) {
+ sysrq_requested = jiffies;
+ sysrq_serial_ptr = sysrq_serial_str;
+ }
+ }
+ else
+ sysrq_serial_ptr = sysrq_serial_str;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+ /* record the character to pass up to the tty layer */
+ if (sn_sal_tty) {
+ *sn_sal_tty->flip.char_buf_ptr = ch;
+ sn_sal_tty->flip.char_buf_ptr++;
+ sn_sal_tty->flip.count++;
+ if (sn_sal_tty->flip.count == TTY_FLIPBUF_SIZE)
+ break;
+ }
+ sn_total_rx_count++;
+ }
+
+ if (sn_sal_tty)
+ tty_flip_buffer_push((struct tty_struct *)sn_sal_tty);
+}
+
+
+/* synch_flush_xmit must be called with sn_sal_lock */
+static void
+synch_flush_xmit(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ if (xmit.cb_head == xmit.cb_tail)
+ return; /* Nothing to do. */
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ DPRINTF("\n*** synch_flush_xmit failed to flush\n");
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = (char *)&xmit.cb_buf[tail];
+ }
+ }
+ }
+}
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_poll_transmit_chars(void)
+{
+ int xmit_count, tail, head;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count == 0)
+ DPRINTF("\n*** empty xmit_count\n");
+
+ /* use the ops, as we could be on the simulator */
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ DPRINTF("\n*** error in synchronous sal_puts\n");
+ /* XXX chadt clean this up */
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_intr_transmit_chars(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ?
+ (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = ia64_sn_console_xmit_chars((char *)start, xmit_count);
+#ifdef DEBUG
+ if (!result)
+ DPRINTF("`");
+#endif
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+ }
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+static irqreturn_t
+sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* this call is necessary to pass the interrupt back to the
+ * SAL, since it doesn't intercept the UART interrupts
+ * itself */
+ int status = ia64_sn_console_intr_status();
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (status & SAL_CONSOLE_INTR_RECV)
+ sn_receive_chars(regs, &flags);
+ if (status & SAL_CONSOLE_INTR_XMIT)
+ sn_intr_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/* returns the console irq if interrupt is successfully registered,
+ * else 0 */
+static int
+sn_sal_connect_interrupt(void)
+{
+ cpuid_t intr_cpuid;
+ unsigned int intr_cpuloc;
+ nasid_t console_nasid;
+ unsigned int console_irq;
+ int result;
+
+ console_nasid = ia64_sn_get_console_nasid();
+ intr_cpuid = first_cpu(node_to_cpumask(nasid_to_cnodeid(console_nasid)));
+ intr_cpuloc = cpu_physical_id(intr_cpuid);
+ console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR);
+
+ result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR);
+ BUG_ON(result != SGI_UART_VECTOR);
+
+ result = request_irq(console_irq, sn_sal_interrupt, SA_INTERRUPT, "SAL console driver", &sn_sal_tty);
+ if (result >= 0)
+ return console_irq;
+
+ printk(KERN_WARNING "sn_serial: console proceeding in polled mode\n");
+ return 0;
+}
+
+static void
+sn_sal_tasklet_action(unsigned long data)
+{
+ unsigned long flags;
+
+ if (sn_sal_tty) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (sn_sal_tty) {
+ if (test_and_clear_bit(SN_SAL_EVENT_WRITE_WAKEUP, &sn_sal_event)) {
+ if ((sn_sal_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && sn_sal_tty->ldisc.write_wakeup)
+ (sn_sal_tty->ldisc.write_wakeup)((struct tty_struct *)sn_sal_tty);
+ wake_up_interruptible((wait_queue_head_t *)&sn_sal_tty->write_wait);
+ }
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+/*
+ * This function handles polled mode.
+ */
+static void
+sn_sal_timer_poll(unsigned long dummy)
+{
+ unsigned long flags;
+
+ if (!sn_sal_irq) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_receive_chars(NULL, &flags);
+ sn_poll_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+ }
+}
+
+
+/*
+ * User-level console routines
+ */
+
+static int
+sn_sal_open(struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ DPRINTF("sn_sal_open: sn_sal_tty = %p, tty = %p, filp = %p\n",
+ sn_sal_tty, tty, filp);
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (!sn_sal_tty)
+ sn_sal_tty = tty;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return 0;
+}
+
+
+/* We're keeping all our resources. We're keeping interrupts turned
+ * on. Maybe just let the tty layer finish its stuff...? GMSH
+ */
+static void
+sn_sal_close(struct tty_struct *tty, struct file * filp)
+{
+ if (tty->count == 1) {
+ unsigned long flags;
+ tty->closing = 1;
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ if (tty->ldisc.flush_buffer)
+ tty->ldisc.flush_buffer(tty);
+ tty->closing = 0;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_sal_tty = NULL;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+static int
+sn_sal_write(struct tty_struct *tty, int from_user,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ unsigned long flags;
+
+ if (from_user) {
+ while (1) {
+ int c1;
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail,
+ SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+
+ c -= copy_from_user(sn_tmp_buffer, buf, c);
+ if (!c) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Turn off interrupts and see if the xmit buffer has
+ * moved since the last time we looked.
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ c1 = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (c1 < c)
+ c = c1;
+
+ memcpy(xmit.cb_buf + xmit.cb_head, sn_tmp_buffer, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ }
+ else {
+ /* The buffer passed in isn't coming from userland,
+ * so cut out the middleman (sn_tmp_buffer).
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ while (1) {
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0) {
+ break;
+ }
+ memcpy(xmit.cb_buf + xmit.cb_head, buf, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (xmit.cb_head != xmit.cb_tail && !(tty && (tty->stopped || tty->hw_stopped)))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return ret;
+}
+
+
+static void
+sn_sal_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) != 0) {
+ xmit.cb_buf[xmit.cb_head] = ch;
+ xmit.cb_head = (xmit.cb_head + 1) & (SN_SAL_BUFFER_SIZE-1);
+ if ( sn_func->sal_wakeup_transmit )
+ sn_func->sal_wakeup_transmit();
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static void
+sn_sal_flush_chars(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static int
+sn_sal_write_room(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static int
+sn_sal_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ DPRINTF("<%d>", space);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static void
+sn_sal_flush_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ /* drop everything */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ xmit.cb_head = xmit.cb_tail = 0;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ /* wake up tty level */
+ wake_up_interruptible(&tty->write_wait);
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup)
+ (tty->ldisc.write_wakeup)(tty);
+}
+
+
+static void
+sn_sal_hangup(struct tty_struct *tty)
+{
+ sn_sal_flush_buffer(tty);
+}
+
+
+static void
+sn_sal_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* this is SAL's problem */
+ DPRINTF("<sn_serial: should wait until sent>");
+}
+
+
+/*
+ * sn_sal_read_proc
+ *
+ * Console /proc interface
+ */
+
+static int
+sn_sal_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "sn_serial: nasid:%ld irq:%d tx:%d rx:%d\n",
+ ia64_sn_get_console_nasid(), sn_sal_irq,
+ sn_total_tx_count, sn_total_rx_count);
+ *eof = 1;
+
+ if (off >= len+begin)
+ return 0;
+ *start = page + (off-begin);
+
+ return count < begin+len-off ? count : begin+len-off;
+}
+
+
+static struct tty_operations sn_sal_driver_ops = {
+ .open = sn_sal_open,
+ .close = sn_sal_close,
+ .write = sn_sal_write,
+ .put_char = sn_sal_put_char,
+ .flush_chars = sn_sal_flush_chars,
+ .write_room = sn_sal_write_room,
+ .chars_in_buffer = sn_sal_chars_in_buffer,
+ .hangup = sn_sal_hangup,
+ .wait_until_sent = sn_sal_wait_until_sent,
+ .read_proc = sn_sal_read_proc,
+};
+static struct tty_driver *sn_sal_driver;
+
+/* sn_sal_init wishlist:
+ * - allocate sn_tmp_buffer
+ * - fix up the tty_driver struct
+ * - turn on receive interrupts
+ * - do any termios twiddling once and for all
+ */
+
+/*
+ * Boot-time initialization code
+ */
+
+static void __init
+sn_sal_switch_to_asynch(void)
+{
+ unsigned long flags;
+
+ /* without early_printk, we may be invoked late enough to race
+ * with other cpus doing console IO at this point, however
+ * console interrupts will never be enabled */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ if (sn_sal_is_asynch) {
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return;
+ }
+
+ DPRINTF("sn_serial: switch to asynchronous console\n");
+
+ /* early_printk invocation may have done this for us */
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+ }
+
+ /* we can't turn on the console interrupt (as request_irq
+ * calls kmalloc, which isn't set up yet), so we rely on a
+ * timer to poll for input and push data from the console
+ * buffer.
+ */
+ init_timer(&sn_sal_timer);
+ sn_sal_timer.function = sn_sal_timer_poll;
+
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_interrupt_timeout = 6;
+ else {
+ /* 960cps / 16 char FIFO = 60HZ
+ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
+ sn_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
+ }
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+
+ sn_sal_is_asynch = 1;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+static void __init
+sn_sal_switch_to_interrupts(void)
+{
+ int irq;
+
+ DPRINTF("sn_serial: switching to interrupt driven console\n");
+
+ irq = sn_sal_connect_interrupt();
+ if (irq) {
+ unsigned long flags;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ /* sn_sal_irq is a global variable. When it's set to
+ * a non-zero value, we stop polling for input (since
+ * interrupts should now be enabled). */
+ sn_sal_irq = irq;
+ sn_func = &intr_ops;
+
+ /* turn on receive interrupts */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+static int __init
+sn_sal_module_init(void)
+{
+ int retval;
+
+ DPRINTF("sn_serial: sn_sal_module_init\n");
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ sn_sal_driver = alloc_tty_driver(1);
+ if ( !sn_sal_driver )
+ return -ENOMEM;
+
+ sn_sal_driver->owner = THIS_MODULE;
+ sn_sal_driver->driver_name = "sn_serial";
+ sn_sal_driver->name = "ttyS";
+ sn_sal_driver->major = TTY_MAJOR;
+ sn_sal_driver->minor_start = SN_SAL_MINOR;
+ sn_sal_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ sn_sal_driver->subtype = SERIAL_TYPE_NORMAL;
+ sn_sal_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
+
+ tty_set_operations(sn_sal_driver, &sn_sal_driver_ops);
+
+ /* when this driver is compiled in, the console initialization
+ * will have already switched us into asynchronous operation
+ * before we get here through the module initcalls */
+ sn_sal_switch_to_asynch();
+
+ /* at this point (module_init) we can try to turn on interrupts */
+ if (!IS_RUNNING_ON_SIMULATOR())
+ sn_sal_switch_to_interrupts();
+
+ sn_sal_driver->init_termios = tty_std_termios;
+ sn_sal_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+
+ if ((retval = tty_register_driver(sn_sal_driver))) {
+ printk(KERN_ERR "sn_serial: Unable to register tty driver\n");
+ return retval;
+ }
+ return 0;
+}
+
+
+static void __exit
+sn_sal_module_exit(void)
+{
+ del_timer_sync(&sn_sal_timer);
+ tty_unregister_driver(sn_sal_driver);
+ put_tty_driver(sn_sal_driver);
+}
+
+module_init(sn_sal_module_init);
+module_exit(sn_sal_module_exit);
+
+/*
+ * Kernel console definitions
+ */
+
+#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
+/*
+ * Print a string to the SAL console. The console_lock must be held
+ * when we get here.
+ */
+static void
+sn_sal_console_write(struct console *co, const char *s, unsigned count)
+{
+ unsigned long flags;
+ const char *s1;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&sn_sal_lock)) {
+ synch_flush_xmit();
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_func->sal_puts(s, s1 - s);
+ sn_func->sal_puts("\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_func->sal_puts(s, count);
+ }
+ else if (in_interrupt()) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ synch_flush_xmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_func->sal_puts(s, s1 - s);
+ sn_func->sal_puts("\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_func->sal_puts(s, count);
+ }
+ else {
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_sal_write(NULL, 0, s, s1 - s);
+ sn_sal_write(NULL, 0, "\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_sal_write(NULL, 0, s, count);
+ }
+}
+
+static struct tty_driver *
+sn_sal_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return sn_sal_driver;
+}
+
+static int __init
+sn_sal_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+
+static struct console sal_console = {
+ .name = "ttyS",
+ .write = sn_sal_console_write,
+ .device = sn_sal_console_device,
+ .setup = sn_sal_console_setup,
+ .index = -1
+};
+
+static int __init
+sn_sal_serial_console_init(void)
+{
+ if (ia64_platform_is("sn2")) {
+ sn_sal_switch_to_asynch();
+ DPRINTF("sn_sal_serial_console_init : register console\n");
+ register_console(&sal_console);
+ }
+ return 0;
+}
+console_initcall(sn_sal_serial_console_init);
+
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */
{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { 0 }
+ { 0, 0, 0, 0 }
};
#define SONYPI_BUF_SIZE 128
/*
* There is a bunch of documentation about the card, jumpers, config
* settings, restrictions, cables, device names and numbers in
- * Documentation/specialix.txt
+ * ../../Documentation/specialix.txt
*/
#include <linux/config.h>
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
static inline int sx_set_serial_info(struct specialix_port * port,
- struct serial_struct __user * newinfo)
+ struct serial_struct * newinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
int change_speed;
unsigned long flags;
+ int error;
+ error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp));
+ if (error)
+ return error;
+
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
static inline int sx_get_serial_info(struct specialix_port * port,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
+ int error;
+ error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp));
+ if (error)
+ return error;
+
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_CIRRUS;
tmp.line = port - sx_port;
unsigned int cmd, unsigned long arg)
{
struct specialix_port *port = (struct specialix_port *)tty->driver_data;
+ int error;
int retval;
- void __user *argp = (void __user *)arg;
if (sx_paranoia_check(port, tty->name, "sx_ioctl"))
return -ENODEV;
sx_send_break(port, arg ? arg*(HZ/10) : HZ/4);
return 0;
case TIOCGSOFTCAR:
- if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp))
- return -EFAULT;
+ error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long));
+ if (error)
+ return error;
+ put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long *) arg);
return 0;
case TIOCSSOFTCAR:
- if (get_user(arg, (unsigned long __user *) argp))
- return -EFAULT;
+ get_user(arg, (unsigned long *) arg);
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return 0;
case TIOCGSERIAL:
- return sx_get_serial_info(port, argp);
+ return sx_get_serial_info(port, (struct serial_struct *) arg);
case TIOCSSERIAL:
- return sx_set_serial_info(port, argp);
+ return sx_set_serial_info(port, (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
static int stl_brdinit(stlbrd_t *brdp);
static int stl_initports(stlbrd_t *brdp, stlpanel_t *panelp);
static int stl_mapirq(int irq, char *name);
-static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp);
-static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp);
-static int stl_getbrdstats(combrd_t __user *bp);
-static int stl_getportstats(stlport_t *portp, comstats_t __user *cp);
-static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp);
-static int stl_getportstruct(stlport_t __user *arg);
-static int stl_getbrdstruct(stlbrd_t __user *arg);
+static int stl_getserial(stlport_t *portp, struct serial_struct *sp);
+static int stl_setserial(stlport_t *portp, struct serial_struct *sp);
+static int stl_getbrdstats(combrd_t *bp);
+static int stl_getportstats(stlport_t *portp, comstats_t *cp);
+static int stl_clrportstats(stlport_t *portp, comstats_t *cp);
+static int stl_getportstruct(unsigned long arg);
+static int stl_getbrdstruct(unsigned long arg);
static int stl_waitcarrier(stlport_t *portp, struct file *filp);
static void stl_delay(int len);
static void stl_eiointr(stlbrd_t *brdp);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j, k;
-#ifdef DEBUG
+#if DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stl_argbrds(void)
+static void stl_argbrds()
{
stlconf_t conf;
stlbrd_t *brdp;
int nrargs, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlbrd_t *stl_allocbrd(void)
+static stlbrd_t *stl_allocbrd()
{
stlbrd_t *brdp;
unsigned int minordev;
int brdnr, panelnr, portnr, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
unsigned long flags;
int rc, doclocal;
-#ifdef DEBUG
+#if DEBUG
printk("stl_waitcarrier(portp=%x,filp=%x)\n", (int) portp, (int) filp);
#endif
stlport_t *portp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
static void stl_delay(int len)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned char *chbuf;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
unsigned int len;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_flushchars(tty=%x)\n", (int) tty);
#endif
stlport_t *portp;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int size;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp)
+static int stl_getserial(stlport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
stlbrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp)
+static int stl_setserial(stlport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
-#ifdef DEBUG
+#if DEBUG
printk("stl_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
stlport_t *portp;
unsigned int ival;
int rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stl_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) argp);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if (get_user(ival, (unsigned int __user *) arg))
- return -EFAULT;
- tty->termios->c_cflag =
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(int))) == 0) {
+ get_user(ival, (unsigned int *) arg);
+ tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
+ }
break;
case TIOCGSERIAL:
- rc = stl_getserial(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stl_getserial(portp, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = stl_setserial(portp, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stl_setserial(portp, (struct serial_struct *) arg);
break;
case COM_GETPORTSTATS:
- rc = stl_getportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_getportstats(portp, (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stl_clrportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_clrportstats(portp, (comstats_t *) arg);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
stlport_t *portp;
struct termios *tiosp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_throttle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_unthrottle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_stop(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_start(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_hangup(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_flushbuffer(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stlport_t *portp;
unsigned long tend;
-#ifdef DEBUG
+#if DEBUG
printk("stl_waituntilsent(tty=%x,timeout=%d)\n", (int) tty, timeout);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#ifdef DEBUG
+#if DEBUG
printk("stl_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
int i;
int handled = 0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_intr(irq=%d,regs=%x)\n", irq, (int) regs);
#endif
portp = private;
-#ifdef DEBUG
+#if DEBUG
printk("stl_offintr(portp=%x)\n", (int) portp);
#endif
{
int rc, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_mapirq(irq=%d,name=%s)\n", irq, name);
#endif
stlport_t *portp;
int chipmask, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initports(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
char *name;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initeio(brdp=%x)\n", (int) brdp);
#endif
int panelnr, banknr, i;
char *name;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initech(brdp=%x)\n", (int) brdp);
#endif
{
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_brdinit(brdp=%x)\n", (int) brdp);
#endif
* Find the next available board number that is free.
*/
-static inline int stl_getbrdnr(void)
+static inline int stl_getbrdnr()
{
int i;
{
stlbrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n", brdtype,
devp->bus->number, devp->devfn);
#endif
* Different Stallion boards use the BAR registers in different ways,
* so set up io addresses based on board type.
*/
-#ifdef DEBUG
+#if DEBUG
printk("%s(%d): BAR[]=%x,%x,%x,%x IRQ=%x\n", __FILE__, __LINE__,
pci_resource_start(devp, 0), pci_resource_start(devp, 1),
pci_resource_start(devp, 2), pci_resource_start(devp, 3), devp->irq);
*/
-static inline int stl_findpcibrds(void)
+static inline int stl_findpcibrds()
{
struct pci_dev *dev = NULL;
int i, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_findpcibrds()\n");
#endif
* since the initial search and setup is too different.
*/
-static inline int stl_initbrds(void)
+static inline int stl_initbrds()
{
stlbrd_t *brdp;
stlconf_t *confp;
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initbrds()\n");
#endif
* Return the board stats structure to user app.
*/
-static int stl_getbrdstats(combrd_t __user *bp)
+static int stl_getbrdstats(combrd_t *bp)
{
stlbrd_t *brdp;
stlpanel_t *panelp;
* what port to get stats for (used through board control device).
*/
-static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
+static int stl_getportstats(stlport_t *portp, comstats_t *cp)
{
unsigned char *head, *tail;
unsigned long flags;
- if (!portp) {
+ if (portp == (stlport_t *) NULL) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp)
+static int stl_clrportstats(stlport_t *portp, comstats_t *cp)
{
- if (!portp) {
+ if (portp == (stlport_t *) NULL) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Return the entire driver ports structure to a user app.
*/
-static int stl_getportstruct(stlport_t __user *arg)
+static int stl_getportstruct(unsigned long arg)
{
stlport_t *portp;
- if (copy_from_user(&stl_dummyport, arg, sizeof(stlport_t)))
+ if (copy_from_user(&stl_dummyport, (void *) arg, sizeof(stlport_t)))
return -EFAULT;
portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
stl_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- return copy_to_user(arg, portp, sizeof(stlport_t)) ? -EFAULT : 0;
+ if (portp == (stlport_t *) NULL)
+ return(-ENODEV);
+ return copy_to_user((void *)arg, portp,
+ sizeof(stlport_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stl_getbrdstruct(stlbrd_t __user *arg)
+static int stl_getbrdstruct(unsigned long arg)
{
stlbrd_t *brdp;
- if (copy_from_user(&stl_dummybrd, arg, sizeof(stlbrd_t)))
+ if (copy_from_user(&stl_dummybrd, (void *) arg, sizeof(stlbrd_t)))
return -EFAULT;
if ((stl_dummybrd.brdnr < 0) || (stl_dummybrd.brdnr >= STL_MAXBRDS))
- return -ENODEV;
+ return(-ENODEV);
brdp = stl_brds[stl_dummybrd.brdnr];
- if (!brdp)
+ if (brdp == (stlbrd_t *) NULL)
return(-ENODEV);
- return copy_to_user(arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
{
int brdnr, rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stl_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n", (int) ip,
(int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- rc = stl_getportstats(NULL, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_getportstats((stlport_t *) NULL,
+ (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stl_clrportstats(NULL, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_clrportstats((stlport_t *) NULL,
+ (comstats_t *) arg);
break;
case COM_GETBRDSTATS:
- rc = stl_getbrdstats(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(combrd_t))) == 0)
+ rc = stl_getbrdstats((combrd_t *) arg);
break;
case COM_READPORT:
- rc = stl_getportstruct(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(stlport_t))) == 0)
+ rc = stl_getportstruct(arg);
break;
case COM_READBOARD:
- rc = stl_getbrdstruct(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(stlbrd_t))) == 0)
+ rc = stl_getbrdstruct(arg);
break;
default:
rc = -ENOIOCTLCMD;
int chipmask, i, j;
int nrchips, uartaddr, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#ifdef DEBUG
+#if DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" cor1=%x cor2=%x cor3=%x cor4=%x cor5=%x\n",
unsigned char msvr1, msvr2;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char sreron, sreroff;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
#endif
save_flags(flags);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
struct tty_struct *tty;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
struct tty_struct *tty;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400flush(portp=%x)\n", (int) portp);
#endif
static int stl_cd1400datastate(stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400datastate(portp=%x)\n", (int) portp);
#endif
{
unsigned char svrtype;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400eiointr(panelp=%x,iobase=%x)\n",
(int) panelp, iobase);
#endif
{
unsigned char svrtype;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400echintr(panelp=%x,iobase=%x)\n", (int) panelp,
iobase);
#endif
char *head, *tail;
unsigned char ioack, srer;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400txisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned char status;
char ch;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400rxisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned int ioack;
unsigned char misr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400mdmisr(panelp=%x)\n", (int) panelp);
#endif
int chipmask, i;
int nrchips, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198panelinit(brdp=%x,panelp=%x)\n",
(int) brdp, (int) panelp);
#endif
static void stl_sc26198portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#ifdef DEBUG
+#if DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" mr0=%x mr1=%x mr2=%x clk=%x\n", mr0, mr1, mr2, clk);
unsigned char iopioron, iopioroff;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char imr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
unsigned long flags;
unsigned char mr0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
unsigned long flags;
unsigned char mr0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198flush(portp=%x)\n", (int) portp);
#endif
unsigned long flags;
unsigned char sr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198datastate(portp=%x)\n", (int) portp);
#endif
{
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198wait(portp=%x)\n", (int) portp);
#endif
int len, stlen;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198txisr(portp=%x)\n", (int) portp);
#endif
struct tty_struct *tty;
unsigned int len, buflen, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198rxisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
{
unsigned char cir, ipr, xisr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198otherisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
#endif
-#ifdef CONFIG_PCI
static struct pci_device_id sx_pci_tbl[] = {
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, sx_pci_tbl);
-#endif /* CONFIG_PCI */
/* Configurable options:
(Don't be too sure that it'll work if you toggle them) */
unsigned int cmd, unsigned long arg)
{
int rc = 0;
- int __user *descr = (int __user *)arg;
- int i;
+ int *descr = (int *)arg, i;
static struct sx_board *board = NULL;
int nbytes, offset;
unsigned long data;
get_user (data, descr++);
while (nbytes && data) {
for (i=0;i<nbytes;i += SX_CHUNK_SIZE) {
- if (copy_from_user(tmp, (char __user *)data+i,
+ if (copy_from_user(tmp, (char *)data + i,
(i + SX_CHUNK_SIZE >
nbytes) ? nbytes - i :
SX_CHUNK_SIZE)) {
{
int rc;
struct sx_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
int ival;
/* func_enter2(); */
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) argp);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned __user *) argp)) == 0) {
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
}
break;
case TIOCGSERIAL:
- rc = gs_getserial(&port->gs, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = gs_getserial(&port->gs, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = gs_setserial(&port->gs, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = gs_setserial(&port->gs, (struct serial_struct *) arg);
break;
default:
rc = -ENOIOCTLCMD;
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
shutdown(info);
tty->closing = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = NULL;/* tty layer will release tty struct */
+ info->tty = 0; /* tty layer will release tty struct */
if(info->count)
info->count--;
}
}
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = NULL;
+ info->memory_base = 0;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = NULL;
+ info->lcr_base = 0;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
#define TMCS 0x64
#define TEPR 0x65
-/*
- * FIXME: DAR here clashed with asm-ppc/reg.h and asm-sh/.../dma.h
- */
-#undef DAR
/* DMA Controller Register macros */
#define DAR 0x80
#define DARL 0x80
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = NULL;/* tty layer will release tty struct */
+ info->tty = 0; /* tty layer will release tty struct */
if(info->count)
info->count--;
}
shutdown(info);
tty->closing = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
if (info->tx_buf) {
kfree(info->tx_buf);
- info->tx_buf = NULL;
+ info->tx_buf = 0;
}
spin_lock_irqsave(&info->lock,flags);
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = NULL;
+ info->memory_base = 0;
}
if (info->sca_base) {
iounmap(info->sca_base - info->sca_offset);
- info->sca_base=NULL;
+ info->sca_base=0;
}
if (info->statctrl_base) {
iounmap(info->statctrl_base - info->statctrl_offset);
- info->statctrl_base=NULL;
+ info->statctrl_base=0;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = NULL;
+ info->lcr_base = 0;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
u32 speed = info->params.clock_speed;
info->params.clock_speed = 3686400;
- info->tty = NULL;
+ info->tty = 0;
/* assume failure */
info->init_error = DiagStatus_DmaFailure;
init_ti_parallel(minor);
parport_release(table[minor].dev);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
if (count == 0)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
parport_claim_or_block(table[minor].dev);
while (n < count) {
printk(TPQIC02_NAME ": ll_do_qic_cmd(%x, %ld) failed\n", cmd, (long) timeout);
return -EIO;
}
-#ifdef OBSOLETE
+#if OBSOLETE
/* wait for ready since it may not be active immediately after reading status */
while ((inb_p(QIC02_STAT_PORT) & QIC02_STAT_READY) != 0)
cpu_relax();
if (stat != TE_OK)
return stat;
-#ifdef OBSOLETE
+#if OBSOLETE
/************* not needed iff rd_status() would wait for ready!!!!!! **********/
if (wait_for_ready(TIM_S) != TE_OK) { /*** not sure this is needed ***/
tpqputs(TPQD_ALWAYS, "wait_for_ready failed in start_dma");
release_region(QIC02_TAPE_PORT, QIC02_TAPE_PORT_RANGE);
if (buffaddr)
free_pages((unsigned long) buffaddr, get_order(TPQBUF_SIZE));
- buffaddr = NULL; /* Better to cause a panic than overwite someone else */
+ buffaddr = 0; /* Better to cause a panic than overwite someone else */
status_zombie = YES;
} /* qic02_release_resources */
ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *);
static unsigned int tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-static int ptmx_open(struct inode *, struct file *);
static int tty_release(struct inode *, struct file *);
int tty_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg);
static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
return 0;
}
static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
return -EIO;
}
.fasync = tty_fasync,
};
-#ifdef CONFIG_UNIX98_PTYS
-static struct file_operations ptmx_fops = {
- .llseek = no_llseek,
- .read = tty_read,
- .write = tty_write,
- .poll = tty_poll,
- .ioctl = tty_ioctl,
- .open = ptmx_open,
- .release = tty_release,
- .fasync = tty_fasync,
-};
-#endif
-
static struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
struct tty_struct * tty;
struct inode *inode;
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
tty = (struct tty_struct *)file->private_data;
inode = file->f_dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
struct tty_struct * tty;
struct inode *inode = file->f_dentry->d_inode;
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (p) {
ssize_t res;
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
res = vfs_write(p, buf, count, &p->f_pos);
fput(p);
return res;
{
struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
- int devpts_master, devpts;
+ int devpts_master;
int idx;
char buf[64];
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
- devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
- devpts_master = pty_master && devpts;
+ devpts_master = pty_master && (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM);
o_tty = tty->link;
#ifdef TTY_PARANOIA_CHECK
#ifdef CONFIG_UNIX98_PTYS
/* Make this pty number available for reallocation */
- if (devpts) {
+ if (devpts_master) {
down(&allocated_ptys_lock);
idr_remove(&allocated_ptys, idx);
up(&allocated_ptys_lock);
dev_t device = inode->i_rdev;
unsigned short saved_flags = filp->f_flags;
- nonseekable_open(inode, filp);
retry_open:
noctty = filp->f_flags & O_NOCTTY;
index = -1;
return -ENODEV;
}
- driver = get_tty_driver(device, &index);
- if (!driver)
- return -ENODEV;
+#ifdef CONFIG_UNIX98_PTYS
+ if (device == MKDEV(TTYAUX_MAJOR,2)) {
+ int idr_ret;
+
+ /* find a device that is not in use. */
+ down(&allocated_ptys_lock);
+ if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+ up(&allocated_ptys_lock);
+ return -ENOMEM;
+ }
+ idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+ if (idr_ret < 0) {
+ up(&allocated_ptys_lock);
+ if (idr_ret == -EAGAIN)
+ return -ENOMEM;
+ return -EIO;
+ }
+ if (index >= pty_limit) {
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return -EIO;
+ }
+ up(&allocated_ptys_lock);
+
+ driver = ptm_driver;
+ retval = init_dev(driver, index, &tty);
+ if (retval) {
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return retval;
+ }
+
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ if (devpts_pty_new(tty->link))
+ retval = -ENOMEM;
+ } else
+#endif
+ {
+ driver = get_tty_driver(device, &index);
+ if (!driver)
+ return -ENODEV;
got_driver:
- retval = init_dev(driver, index, &tty);
- if (retval)
- return retval;
+ retval = init_dev(driver, index, &tty);
+ if (retval)
+ return retval;
+ }
filp->private_data = tty;
file_move(filp, &tty->tty_files);
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
+
+#ifdef CONFIG_UNIX98_PTYS
+ if (index != -1) {
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ }
+#endif
+
release_dev(filp);
if (retval != -ERESTARTSYS)
return retval;
return 0;
}
-#ifdef CONFIG_UNIX98_PTYS
-static int ptmx_open(struct inode * inode, struct file * filp)
-{
- struct tty_struct *tty;
- int retval;
- int index;
- int idr_ret;
-
- nonseekable_open(inode, filp);
-
- /* find a device that is not in use. */
- down(&allocated_ptys_lock);
- if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
- return -ENOMEM;
- }
- idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
- if (idr_ret < 0) {
- up(&allocated_ptys_lock);
- if (idr_ret == -EAGAIN)
- return -ENOMEM;
- return -EIO;
- }
- if (index >= pty_limit) {
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return -EIO;
- }
- up(&allocated_ptys_lock);
-
- retval = init_dev(ptm_driver, index, &tty);
- if (retval)
- goto out;
-
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
-
- retval = -ENOMEM;
- if (devpts_pty_new(tty->link))
- goto out1;
-
- check_tty_count(tty, "tty_open");
- retval = ptm_driver->open(tty, filp);
- if (!retval)
- return 0;
-out1:
- release_dev(filp);
-out:
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return retval;
-}
-#endif
-
static int tty_release(struct inode * inode, struct file * filp)
{
lock_kernel();
class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
#ifdef CONFIG_UNIX98_PTYS
- cdev_init(&ptmx_cdev, &ptmx_fops);
+ cdev_init(&ptmx_cdev, &tty_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
panic("Couldn't register /dev/ptmx driver\n");
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/pm.h>
-#include <linux/font.h>
#include <asm/io.h>
#include <asm/system.h>
#define max_font_size 65536
-int con_font_get(int currcons, struct console_font_op *op)
+int con_font_op(int currcons, struct console_font_op *op)
{
- struct console_font font;
int rc = -EINVAL;
- int c;
+ int size = max_font_size, set;
+ u8 *temp = NULL;
+ struct console_font_op old_op;
if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
+ goto quit;
+ memcpy(&old_op, op, sizeof(old_op));
+ if (op->op == KD_FONT_OP_SET) {
+ if (!op->data)
+ return -EINVAL;
+ if (op->charcount > 512)
+ goto quit;
+ if (!op->height) { /* Need to guess font height [compat] */
+ int h, i;
+ u8 __user *charmap = op->data;
+ u8 tmp;
+
+ /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
+ so that we can get rid of this soon */
+ if (!(op->flags & KD_FONT_FLAG_OLD))
+ goto quit;
+ rc = -EFAULT;
+ for (h = 32; h > 0; h--)
+ for (i = 0; i < op->charcount; i++) {
+ if (get_user(tmp, &charmap[32*i+h-1]))
+ goto quit;
+ if (tmp)
+ goto nonzero;
+ }
+ rc = -EINVAL;
+ goto quit;
+ nonzero:
+ rc = -EINVAL;
+ op->height = h;
+ }
+ if (op->width > 32 || op->height > 32)
+ goto quit;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+ set = 1;
+ } else if (op->op == KD_FONT_OP_GET)
+ set = 0;
+ else {
+ acquire_console_sem();
+ rc = sw->con_font_op(vc_cons[currcons].d, op);
+ release_console_sem();
+ return rc;
+ }
if (op->data) {
- font.data = kmalloc(max_font_size, GFP_KERNEL);
- if (!font.data)
+ temp = kmalloc(size, GFP_KERNEL);
+ if (!temp)
return -ENOMEM;
- } else
- font.data = NULL;
-
- acquire_console_sem();
- if (sw->con_font_get)
- rc = sw->con_font_get(vc_cons[currcons].d, &font);
- else
- rc = -ENOSYS;
- release_console_sem();
-
- if (rc)
- goto out;
-
- c = (font.width+7)/8 * 32 * font.charcount;
-
- if (op->data && font.charcount > op->charcount)
- rc = -ENOSPC;
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- if (font.width > op->width || font.height > op->height)
- rc = -ENOSPC;
- } else {
- if (font.width != 8)
- rc = -EIO;
- else if ((op->height && font.height > op->height) ||
- font.height > 32)
- rc = -ENOSPC;
+ if (set && copy_from_user(temp, op->data, size)) {
+ rc = -EFAULT;
+ goto quit;
+ }
+ op->data = temp;
}
- if (rc)
- goto out;
-
- if (op->data && copy_to_user(op->data, font.data, c))
- rc = -EFAULT;
-
-out:
- kfree(font.data);
- return rc;
-}
-
-int con_font_set(int currcons, struct console_font_op *op)
-{
- struct console_font font;
- int rc = -EINVAL;
- int size;
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
- if (!op->data)
- return -EINVAL;
- if (op->charcount > 512)
- return -EINVAL;
- if (!op->height) { /* Need to guess font height [compat] */
- int h, i;
- u8 __user *charmap = op->data;
- u8 tmp;
-
- /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
- so that we can get rid of this soon */
- if (!(op->flags & KD_FONT_FLAG_OLD))
- return -EINVAL;
- for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++) {
- if (get_user(tmp, &charmap[32*i+h-1]))
- return -EFAULT;
- if (tmp)
- goto nonzero;
- }
- return -EINVAL;
- nonzero:
- op->height = h;
- }
- if (op->width <= 0 || op->width > 32 || op->height > 32)
- return -EINVAL;
- size = (op->width+7)/8 * 32 * op->charcount;
- if (size > max_font_size)
- return -ENOSPC;
- font.charcount = op->charcount;
- font.height = op->height;
- font.width = op->width;
- font.data = kmalloc(size, GFP_KERNEL);
- if (!font.data)
- return -ENOMEM;
- if (copy_from_user(font.data, op->data, size)) {
- kfree(font.data);
- return -EFAULT;
- }
acquire_console_sem();
- if (sw->con_font_set)
- rc = sw->con_font_set(vc_cons[currcons].d, &font, op->flags);
- else
- rc = -ENOSYS;
+ rc = sw->con_font_op(vc_cons[currcons].d, op);
release_console_sem();
- kfree(font.data);
- return rc;
-}
-
-int con_font_default(int currcons, struct console_font_op *op)
-{
- struct console_font font = {.width = op->width, .height = op->height};
- char name[MAX_FONT_NAME];
- char *s = name;
- int rc;
-
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
- if (!op->data)
- s = NULL;
- else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
- return -EFAULT;
- else
- name[MAX_FONT_NAME - 1] = 0;
- acquire_console_sem();
- if (sw->con_font_default)
- rc = sw->con_font_default(vc_cons[currcons].d, &font, s);
- else
- rc = -ENOSYS;
- release_console_sem();
- if (!rc) {
- op->width = font.width;
- op->height = font.height;
+ op->data = old_op.data;
+ if (!rc && !set) {
+ int c = (op->width+7)/8 * 32 * op->charcount;
+
+ if (op->data && op->charcount > old_op.charcount)
+ rc = -ENOSPC;
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ if (op->width > old_op.width ||
+ op->height > old_op.height)
+ rc = -ENOSPC;
+ } else {
+ if (op->width != 8)
+ rc = -EIO;
+ else if ((old_op.height && op->height > old_op.height) ||
+ op->height > 32)
+ rc = -ENOSPC;
+ }
+ if (!rc && op->data && copy_to_user(op->data, temp, c))
+ rc = -EFAULT;
}
+quit: if (temp)
+ kfree(temp);
return rc;
}
-int con_font_copy(int currcons, struct console_font_op *op)
-{
- int con = op->height;
- struct vc_data *vc;
- int rc;
-
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
- acquire_console_sem();
- vc = vc_cons[currcons].d;
- if (!sw->con_font_copy)
- rc = -ENOSYS;
- else if (con < 0 || !vc_cons_allocated(con))
- rc = -ENOTTY;
- else if (con == vc->vc_num) /* nothing to do */
- rc = 0;
- else
- rc = sw->con_font_copy(vc, con);
- release_console_sem();
- return rc;
-}
-
-int con_font_op(int currcons, struct console_font_op *op)
-{
- switch (op->op) {
- case KD_FONT_OP_SET:
- return con_font_set(currcons, op);
- case KD_FONT_OP_GET:
- return con_font_get(currcons, op);
- case KD_FONT_OP_SET_DEFAULT:
- return con_font_default(currcons, op);
- case KD_FONT_OP_COPY:
- return con_font_copy(currcons, op);
- }
- return -ENOSYS;
-}
-
/*
* Interface exported to selection and vcs.
*/
op.width = 8;
op.height = 0;
op.charcount = 256;
- op.data = up;
+ op.data = (char *) arg;
return con_font_op(fg_console, &op);
}
op.width = 8;
op.height = 32;
op.charcount = 256;
- op.data = up;
+ op.data = (char *) arg;
return con_font_op(fg_console, &op);
}
Say N if you are unsure.
-config IXP2000_WATCHDOG
- tristate "IXP2000 Watchdog"
- depends on WATCHDOG && ARCH_IXP2000
- help
- Say Y here if to include support for the watchdog timer
- in the Intel IXP2000(2400, 2800, 2850) network processors.
- This driver can be built as a module by choosing M. The module
- will be called ixp2000_wdt.
-
- Say N if you are unsure.
-
config SA1100_WATCHDOG
tristate "SA1100/PXA2xx watchdog"
depends on WATCHDOG && ( ARCH_SA1100 || ARCH_PXA )
obj-$(CONFIG_PCIPCWATCHDOG) += pcwd_pci.o
obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
-obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
/* Activate */
acq_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int acq_close(struct inode *inode, struct file *file)
static ssize_t
advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
*/
advwdt_ping();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t ali_write(struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
ali_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/*
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
return -EBUSY;
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return 0;
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
eurwdt_timeout = WDT_TIMEOUT; /* initial timeout */
/* Activate the WDT */
eurwdt_activate_timer();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
*/
tco_timer_keepalive ();
tco_timer_start ();
- return nonseekable_open(inode, file);
+ return 0;
}
static int i8xx_tco_release (struct inode *inode, struct file *file)
static ssize_t i8xx_tco_write (struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
static ssize_t
ibwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
/* Activate */
ibwdt_ping();
spin_unlock(&ibwdt_lock);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
indydog_alive = 1;
printk(KERN_INFO "Started watchdog timer.\n");
- return nonseekable_open(inode, file);
+ return 0;
}
static int indydog_release(struct inode *inode, struct file *file)
static ssize_t indydog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* Refresh the timer. */
if (len) {
indydog_ping();
+++ /dev/null
-/*
- * drivers/watchdog/ixp2000_wdt.c
- *
- * Watchdog driver for Intel IXP2000 network processors
- *
- * Adapted from the IXP4xx watchdog driver by Lennert Buytenhek.
- * The original version carries these notices:
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/init.h>
-
-#include <asm/hardware.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-static int nowayout = 1;
-#else
-static int nowayout = 0;
-#endif
-static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
-static unsigned long wdt_status;
-
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-
-static unsigned long wdt_tick_rate;
-
-static void
-wdt_enable(void)
-{
- ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE);
- ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE);
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
- ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE);
-}
-
-static void
-wdt_disable(void)
-{
- ixp2000_reg_write(IXP2000_T4_CTL, 0);
-}
-
-static void
-wdt_keepalive(void)
-{
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
-}
-
-static int
-ixp2000_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- wdt_enable();
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t
-ixp2000_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- wdt_keepalive();
- }
-
- return len;
-}
-
-
-static struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "IXP2000 Watchdog",
-};
-
-static int
-ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOIOCTLCMD;
- int time;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
- if (ret)
- break;
-
- if (time <= 0 || time > 60) {
- ret = -EINVAL;
- break;
- }
-
- heartbeat = time;
- wdt_keepalive();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static int
-ixp2000_wdt_release(struct inode *inode, struct file *file)
-{
- if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
- wdt_disable();
- } else {
- printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - "
- "timer will not stop\n");
- }
-
- clear_bit(WDT_IN_USE, &wdt_status);
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- return 0;
-}
-
-
-static struct file_operations ixp2000_wdt_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = ixp2000_wdt_write,
- .ioctl = ixp2000_wdt_ioctl,
- .open = ixp2000_wdt_open,
- .release = ixp2000_wdt_release,
-};
-
-static struct miscdevice ixp2000_wdt_miscdev =
-{
- .minor = WATCHDOG_MINOR,
- .name = "IXP2000 Watchdog",
- .fops = &ixp2000_wdt_fops,
-};
-
-static int __init ixp2000_wdt_init(void)
-{
- wdt_tick_rate = (*IXP2000_T1_CLD * HZ)/ 256;;
-
- return misc_register(&ixp2000_wdt_miscdev);
-}
-
-static void __exit ixp2000_wdt_exit(void)
-{
- misc_deregister(&ixp2000_wdt_miscdev);
-}
-
-module_init(ixp2000_wdt_init);
-module_exit(ixp2000_wdt_exit);
-
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net">);
-MODULE_DESCRIPTION("IXP2000 Network Processor Watchdog");
-
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)");
-
-module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
wdt_enable();
- return nonseekable_open(inode, file);
+ return 0;
}
static ssize_t
ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character */
if(count){
zf_timer_on();
- return nonseekable_open(inode, file);
+ return 0;
}
static int zf_close(struct inode *inode, struct file *file)
mixcomwd_timer_alive=0;
}
}
- return nonseekable_open(inode, file);
+ return 0;
}
static int mixcomwd_release(struct inode *inode, struct file *file)
static ssize_t mixcomwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos) {
+ return -ESPIPE;
+ }
+
if(len)
{
if (!nowayout) {
static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
/* Activate */
pcwd_start();
pcwd_keepalive();
- return nonseekable_open(inode, file);
+ return(0);
}
static int pcwd_close(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (pcwd_get_temperature(&temperature))
return -EFAULT;
if (!supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcwd_temp_close(struct inode *inode, struct file *file)
static ssize_t pcipcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
pcipcwd_start();
pcipcwd_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcipcwd_release(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (pcipcwd_get_temperature(&temperature))
return -EFAULT;
if (!pcipcwd_private.supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcipcwd_temp_release(struct inode *inode, struct file *file)
static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
*/
static int sa1100dog_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
if (test_and_set_bit(1,&sa1100wdt_users))
return -EBUSY;
static ssize_t sa1100dog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
static int fop_open(struct inode * inode, struct file * file)
{
- nonseekable_open(inode, file);
-
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
static int sc1200wdt_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
-
/* allow one at a time */
if (down_trylock(&open_sem))
return -EBUSY;
static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
static int fop_open(struct inode * inode, struct file * file)
{
- nonseekable_open(inode, file);
-
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
return -EBUSY;
scx200_wdt_enable();
- return nonseekable_open(inode, file);
+ return 0;
}
static int scx200_wdt_release(struct inode *inode, struct file *file)
static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* check for a magic close character */
if (len)
{
sh_wdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
* Activate timer
*/
softdog_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int softdog_release(struct inode *inode, struct file *file)
static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
static ssize_t
wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
*/
wdt_ping();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return 0;
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
* Activate
*/
wafwdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if(count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
if (wdt_get_temperature(&temperature))
return -EFAULT;
static int wdt_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
/**
ret = 0;
#endif
- nonseekable_open(inode, file);
return ret;
}
static ssize_t
watchdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
__module_get(THIS_MODULE);
wdt977_start();
- return nonseekable_open(inode, file);
+ return 0;
}
static int wdt977_release(struct inode *inode, struct file *file)
* write of data will do, as we we don't define content meaning.
*/
-static ssize_t wdt977_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t wdt977_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
int status;
int new_options, retval = -EINVAL;
int new_timeout;
- union {
- struct watchdog_info __user *ident;
- int __user *i;
- } uarg;
-
- uarg.i = (int __user *)arg;
switch(cmd)
{
return -ENOIOCTLCMD;
case WDIOC_GETSUPPORT:
- return copy_to_user(uarg.ident, &ident,
+ return copy_to_user((struct watchdog_info *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
wdt977_get_status(&status);
- return put_user(status, uarg.i);
+ return put_user(status, (int *) arg);
case WDIOC_GETBOOTSTATUS:
- return put_user(0, uarg.i);
+ return put_user(0, (int *) arg);
case WDIOC_KEEPALIVE:
wdt977_keepalive();
return 0;
case WDIOC_SETOPTIONS:
- if (get_user (new_options, uarg.i))
+ if (get_user (new_options, (int *) arg))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
return retval;
case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, uarg.i))
+ if (get_user(new_timeout, (int *) arg))
return -EFAULT;
if (wdt977_set_timeout(new_timeout))
/* Fall */
case WDIOC_GETTIMEOUT:
- return put_user(timeout, uarg.i);
+ return put_user(timeout, (int *)arg);
}
}
static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdtpci_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
if (wdtpci_get_temperature(&temperature))
return -EFAULT;
static int wdtpci_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
/**
/*********************** cpufreq_sysctl interface ********************/
static int
cpufreq_procctl(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char buf[16], *p;
int cpu = (long) ctl->extra1;
unsigned int len, left = *lenp;
- if (!left || (*ppos && !write) || !cpu_online(cpu)) {
+ if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) {
*lenp = 0;
return 0;
}
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
+++ /dev/null
-#
-# Makefile for the dump device drivers.
-#
-
-dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
-dump-$(CONFIG_X86) += dump_i386.o
-dump-$(CONFIG_ARM) += dump_arm.o
-dump-$(CONFIG_PPC64) += dump_ppc64.o
-dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
-dump-objs += $(dump-y)
-
-obj-$(CONFIG_CRASH_DUMP) += dump.o
-obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
-obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
-obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
-obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
+++ /dev/null
-/*
- * Architecture specific (ARM/XScale) functions for Linux crash dumps.
- *
- * Created by: Fleming Feng (fleming.feng@intel.com)
- *
- * Copyright(C) 2003 Intel Corp. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * The hooks for dumping the kernel virtual memory to disk are in this
- * file. Any time a modification is made to the virtual memory mechanism,
- * these routines must be changed to use the new mechanisms.
- */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/fs.h>
-#include <linux/vmalloc.h>
-#include <linux/dump.h>
-#include <linux/mm.h>
-#include <asm/processor.h>
-#include <asm/hardirq.h>
-#include <asm/kdebug.h>
-
-static __s32 saved_irq_count; /* saved preempt_count() flags */
-
-static int alloc_dha_stack(void)
-{
- int i;
- void *ptr;
-
- if (dump_header_asm.dha_stack[0])
- return 0;
-
- ptr = vmalloc(THREAD_SIZE * num_online_cpus());
- if (!ptr) {
- printk("vmalloc for dha_stacks failed\n");
- return -ENOMEM;
- }
-
- for( i = 0; i < num_online_cpus(); i++){
- dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
- (i * THREAD_SIZE));
- }
-
- return 0;
-}
-
-static int free_dha_stack(void)
-{
- if (dump_header_asm.dha_stack[0]){
- vfree((void*)dump_header_asm.dha_stack[0]);
- dump_header_asm.dha_stack[0] = 0;
- }
- return 0;
-}
-
-void __dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
-{
-
- /* Here, because the arm version uses _dump_regs_t,
- * instead of pt_regs in dump_header_asm, while the
- * the function is defined inside architecture independent
- * header file include/linux/dump.h, the size of block of
- * memory copied is not equal to pt_regs.
- */
-
- memcpy(dest_regs, regs, sizeof(_dump_regs_t));
-
-}
-
-#ifdef CONFIG_SMP
-/* FIXME: This is reserved for possible future usage for SMP system
- * based on ARM/XScale. Currently, there is no information for an
- * SMP system based on ARM/XScale, they are not used!
- */
-/* save registers on other processor */
-void
-__dump_save_other_cpus(void)
-{
-
- /* Dummy now! */
-
- return;
-
-}
-#else /* !CONFIG_SMP */
-#define save_other_cpu_state() do { } while (0)
-#endif /* !CONFIG_SMP */
-
-/*
- * Kludge - dump from interrupt context is unreliable (Fixme)
- *
- * We do this so that softirqs initiated for dump i/o
- * get processed and we don't hang while waiting for i/o
- * to complete or in any irq synchronization attempt.
- *
- * This is not quite legal of course, as it has the side
- * effect of making all interrupts & softirqs triggered
- * while dump is in progress complete before currently
- * pending softirqs and the currently executing interrupt
- * code.
- */
-static inline void
-irq_bh_save(void)
-{
- saved_irq_count = irq_count();
- preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-}
-
-static inline void
-irq_bh_restore(void)
-{
- preempt_count() |= saved_irq_count;
-}
-
-/*
- * Name: __dump_irq_enable
- * Func: Reset system so interrupts are enabled.
- * This is used for dump methods that requires interrupts
- * Eventually, all methods will have interrupts disabled
- * and this code can be removed.
- *
- * Re-enable interrupts
- */
-int
-__dump_irq_enable(void)
-{
- irq_bh_save();
- local_irq_enable();
- return 0;
-}
-
-/* Name: __dump_irq_restore
- * Func: Resume the system state in an architecture-specific way.
- */
-void
-__dump_irq_restore(void)
-{
- local_irq_disable();
- irq_bh_restore();
-}
-
-
-/*
- * Name: __dump_configure_header()
- * Func: Meant to fill in arch specific header fields except per-cpu state
- * already captured in dump_lcrash_configure_header.
- */
-int
-__dump_configure_header(const struct pt_regs *regs)
-{
- return (0);
-}
-
-/*
- * Name: dump_die_event
- * Func: Called from notify_die
- */
-static int dump_die_event(struct notifier_block* this,
- unsigned long event,
- void* arg)
-{
- const struct die_args* args = (const struct die_args*)arg;
-
- switch(event){
- case DIE_PANIC:
- case DIE_OOPS:
- case DIE_WATCHDOG:
- dump_execute(args->str, args->regs);
- break;
- }
- return NOTIFY_DONE;
-
-}
-
-static struct notifier_block dump_die_block = {
- .notifier_call = dump_die_event,
-};
-
-/* Name: __dump_init()
- * Func: Initialize the dumping routine process.
- */
-void
-__dump_init(uint64_t local_memory_start)
-{
- /* hook into PANIC and OOPS */
- register_die_notifier(&dump_die_block);
-}
-
-/*
- * Name: __dump_open()
- * Func: Open the dump device (architecture specific). This is in
- * case it's necessary in the future.
- */
-void
-__dump_open(void)
-{
-
- alloc_dha_stack();
-
- return;
-}
-
-/*
- * Name: __dump_cleanup()
- * Func: Free any architecture specific data structures. This is called
- * when the dump module is being removed.
- */
-void
-__dump_cleanup(void)
-{
- free_dha_stack();
- unregister_die_notifier(&dump_die_block);
-
- /* return */
- return;
-}
-
-/*
- * Name: __dump_page_valid()
- * Func: Check if page is valid to dump.
- */
-int
-__dump_page_valid(unsigned long index)
-{
- if(!pfn_valid(index))
- return 0;
- else
- return 1;
-}
-
-/*
- * Name: manual_handle_crashdump
- * Func: Interface for the lkcd dump command. Calls dump_execute()
- */
-int
-manual_handle_crashdump(void) {
-
- _dump_regs_t regs;
-
- get_current_general_regs(®s);
- get_current_cp14_regs(®s);
- get_current_cp15_regs(®s);
- dump_execute("manual", ®s);
- return 0;
-}
+++ /dev/null
-/*
- * Implements the dump driver interface for saving a dump to
- * a block device through the kernel's generic low level block i/o
- * routines.
- *
- * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
- * Moved original lkcd kiobuf dump i/o code from dump_base.c
- * to use generic dump device interfaces
- *
- * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
- * Convert dump i/o to directly use bio instead of kiobuf for 2.5
- *
- * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- * Rework to new dumpdev.h structures, implement open/close/
- * silence, misc fixes (blocknr removal, bio_add_page usage)
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <asm/hardirq.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-
-extern void *dump_page_buf;
-
-/* The end_io callback for dump i/o completion */
-static int
-dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
-{
- struct dump_blockdev *dump_bdev;
-
- if (bio->bi_size) {
- /* some bytes still left to transfer */
- return 1; /* not complete */
- }
-
- dump_bdev = (struct dump_blockdev *)bio->bi_private;
- if (error) {
- printk("IO error while writing the dump, aborting\n");
- }
-
- dump_bdev->err = error;
-
- /* no wakeup needed, since caller polls for completion */
- return 0;
-}
-
-/* Check if the dump bio is already mapped to the specified buffer */
-static int
-dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
- int len)
-{
- struct bio *bio = dev->bio;
- unsigned long bsize = 0;
-
- if (!bio->bi_vcnt)
- return 0; /* first time, not mapped */
-
-
- if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
- return 0; /* buffer not mapped */
-
- bsize = bdev_hardsect_size(bio->bi_bdev);
- if ((len & (PAGE_SIZE - 1)) || (len & bsize))
- return 0; /* alignment checks needed */
-
- /* quick check to decide if we need to redo bio_add_page */
- if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
- return 0; /* device may have other restrictions */
-
- return 1; /* already mapped */
-}
-
-/*
- * Set up the dump bio for i/o from the specified buffer
- * Return value indicates whether the full buffer could be mapped or not
- */
-static int
-dump_block_map(struct dump_blockdev *dev, void *buf, int len)
-{
- struct page *page = virt_to_page(buf);
- struct bio *bio = dev->bio;
- unsigned long bsize = 0;
-
- bio->bi_bdev = dev->bdev;
- bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
- bio->bi_idx = 0; /* reset index to the beginning */
-
- if (dump_block_map_valid(dev, page, len)) {
- /* already mapped and usable rightaway */
- bio->bi_size = len; /* reset size to the whole bio */
- } else {
- /* need to map the bio */
- bio->bi_size = 0;
- bio->bi_vcnt = 0;
- bsize = bdev_hardsect_size(bio->bi_bdev);
-
- /* first a few sanity checks */
- if (len < bsize) {
- printk("map: len less than hardsect size \n");
- return -EINVAL;
- }
-
- if ((unsigned long)buf & bsize) {
- printk("map: not aligned \n");
- return -EINVAL;
- }
-
- /* assume contig. page aligned low mem buffer( no vmalloc) */
- if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
- printk("map: invalid buffer alignment!\n");
- return -EINVAL;
- }
- /* finally we can go ahead and map it */
- while (bio->bi_size < len)
- if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
- break;
- }
-
- bio->bi_end_io = dump_bio_end_io;
- bio->bi_private = dev;
- }
-
- if (bio->bi_size != len) {
- printk("map: bio size = %d not enough for len = %d!\n",
- bio->bi_size, len);
- return -E2BIG;
- }
- return 0;
-}
-
-static void
-dump_free_bio(struct bio *bio)
-{
- if (bio)
- kfree(bio->bi_io_vec);
- kfree(bio);
-}
-
-/*
- * Prepares the dump device so we can take a dump later.
- * The caller is expected to have filled up the dev_id field in the
- * block dump dev structure.
- *
- * At dump time when dump_block_write() is invoked it will be too
- * late to recover, so as far as possible make sure obvious errors
- * get caught right here and reported back to the caller.
- */
-static int
-dump_block_open(struct dump_dev *dev, unsigned long arg)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
- struct block_device *bdev;
- int retval = 0;
- struct bio_vec *bvec;
-
- /* make sure this is a valid block device */
- if (!arg) {
- retval = -EINVAL;
- goto err;
- }
-
- /* Convert it to the new dev_t format */
- arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
-
- /* get a corresponding block_dev struct for this */
- bdev = bdget((dev_t)arg);
- if (!bdev) {
- retval = -ENODEV;
- goto err;
- }
-
- /* get the block device opened */
- if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
- goto err1;
- }
-
- if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
- == NULL) {
- printk("Cannot allocate bio\n");
- retval = -ENOMEM;
- goto err2;
- }
-
- bio_init(dump_bdev->bio);
-
- if ((bvec = kmalloc(sizeof(struct bio_vec) *
- (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
- retval = -ENOMEM;
- goto err3;
- }
-
- /* assign the new dump dev structure */
- dump_bdev->dev_id = (dev_t)arg;
- dump_bdev->bdev = bdev;
-
- /* make a note of the limit */
- dump_bdev->limit = bdev->bd_inode->i_size;
-
- /* now make sure we can map the dump buffer */
- dump_bdev->bio->bi_io_vec = bvec;
- dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
-
- retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
- DUMP_BUFFER_SIZE);
-
- if (retval) {
- printk("open: dump_block_map failed, ret %d\n", retval);
- goto err3;
- }
-
- printk("Block device (%d,%d) successfully configured for dumping\n",
- MAJOR(dump_bdev->dev_id),
- MINOR(dump_bdev->dev_id));
-
-
- /* after opening the block device, return */
- return retval;
-
-err3: dump_free_bio(dump_bdev->bio);
- dump_bdev->bio = NULL;
-err2: if (bdev) blkdev_put(bdev);
- goto err;
-err1: if (bdev) bdput(bdev);
- dump_bdev->bdev = NULL;
-err: return retval;
-}
-
-/*
- * Close the dump device and release associated resources
- * Invoked when unconfiguring the dump device.
- */
-static int
-dump_block_release(struct dump_dev *dev)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-
- /* release earlier bdev if present */
- if (dump_bdev->bdev) {
- blkdev_put(dump_bdev->bdev);
- dump_bdev->bdev = NULL;
- }
-
- dump_free_bio(dump_bdev->bio);
- dump_bdev->bio = NULL;
-
- return 0;
-}
-
-
-/*
- * Prepare the dump device for use (silence any ongoing activity
- * and quiesce state) when the system crashes.
- */
-static int
-dump_block_silence(struct dump_dev *dev)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
- struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
- int ret;
-
- /* If we can't get request queue lock, refuse to take the dump */
- if (!spin_trylock(q->queue_lock))
- return -EBUSY;
-
- ret = elv_queue_empty(q);
- spin_unlock(q->queue_lock);
-
- /* For now we assume we have the device to ourselves */
- /* Just a quick sanity check */
- if (!ret) {
- /* Warn the user and move on */
- printk(KERN_ALERT "Warning: Non-empty request queue\n");
- printk(KERN_ALERT "I/O requests in flight at dump time\n");
- }
-
- /*
- * Move to a softer level of silencing where no spin_lock_irqs
- * are held on other cpus
- */
- dump_silence_level = DUMP_SOFT_SPIN_CPUS;
-
- ret = __dump_irq_enable();
- if (ret) {
- return ret;
- }
-
- printk("Dumping to block device (%d,%d) on CPU %d ...\n",
- MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
- smp_processor_id());
-
- return 0;
-}
-
-/*
- * Invoked when dumping is done. This is the time to put things back
- * (i.e. undo the effects of dump_block_silence) so the device is
- * available for normal use.
- */
-static int
-dump_block_resume(struct dump_dev *dev)
-{
- __dump_irq_restore();
- return 0;
-}
-
-
-/*
- * Seek to the specified offset in the dump device.
- * Makes sure this is a valid offset, otherwise returns an error.
- */
-static int
-dump_block_seek(struct dump_dev *dev, loff_t off)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
- loff_t offset = off + dump_bdev->start_offset;
-
- if (offset & ( PAGE_SIZE - 1)) {
- printk("seek: non-page aligned\n");
- return -EINVAL;
- }
-
- if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
- printk("seek: not sector aligned \n");
- return -EINVAL;
- }
-
- if (offset > dump_bdev->limit) {
- printk("seek: not enough space left on device!\n");
- return -ENOSPC;
- }
- dev->curr_offset = off;
- return 0;
-}
-
-/*
- * Write out a buffer after checking the device limitations,
- * sector sizes, etc. Assumes the buffer is in directly mapped
- * kernel address space (not vmalloc'ed).
- *
- * Returns: number of bytes written or -ERRNO.
- */
-static int
-dump_block_write(struct dump_dev *dev, void *buf,
- unsigned long len)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
- loff_t offset = dev->curr_offset + dump_bdev->start_offset;
- int retval = -ENOSPC;
-
- if (offset >= dump_bdev->limit) {
- printk("write: not enough space left on device!\n");
- goto out;
- }
-
- /* don't write more blocks than our max limit */
- if (offset + len > dump_bdev->limit)
- len = dump_bdev->limit - offset;
-
-
- retval = dump_block_map(dump_bdev, buf, len);
- if (retval){
- printk("write: dump_block_map failed! err %d\n", retval);
- goto out;
- }
-
- /*
- * Write out the data to disk.
- * Assumes the entire buffer mapped to a single bio, which we can
- * submit and wait for io completion. In the future, may consider
- * increasing the dump buffer size and submitting multiple bio s
- * for better throughput.
- */
- dump_bdev->err = -EAGAIN;
- submit_bio(WRITE, dump_bdev->bio);
-
- dump_bdev->ddev.curr_offset += len;
- retval = len;
- out:
- return retval;
-}
-
-/*
- * Name: dump_block_ready()
- * Func: check if the last dump i/o is over and ready for next request
- */
-static int
-dump_block_ready(struct dump_dev *dev, void *buf)
-{
- struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
- request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
-
- /* check for io completion */
- if (dump_bdev->err == -EAGAIN) {
- q->unplug_fn(q);
- return -EAGAIN;
- }
-
- if (dump_bdev->err) {
- printk("dump i/o err\n");
- return dump_bdev->err;
- }
-
- return 0;
-}
-
-
-struct dump_dev_ops dump_blockdev_ops = {
- .open = dump_block_open,
- .release = dump_block_release,
- .silence = dump_block_silence,
- .resume = dump_block_resume,
- .seek = dump_block_seek,
- .write = dump_block_write,
- /* .read not implemented */
- .ready = dump_block_ready
-};
-
-static struct dump_blockdev default_dump_blockdev = {
- .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
- .curr_offset = 0},
- /*
- * leave enough room for the longest swap header possibly written
- * written by mkswap (likely the largest page size supported by
- * the arch
- */
- .start_offset = DUMP_HEADER_OFFSET,
- .err = 0
- /* assume the rest of the fields are zeroed by default */
-};
-
-struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
-
-static int __init
-dump_blockdev_init(void)
-{
- if (dump_register_device(&dump_blockdev->ddev) < 0) {
- printk("block device driver registration failed\n");
- return -1;
- }
-
- printk("block device driver for LKCD registered\n");
- return 0;
-}
-
-static void __exit
-dump_blockdev_cleanup(void)
-{
- dump_unregister_device(&dump_blockdev->ddev);
- printk("block device driver for LKCD unregistered\n");
-}
-
-MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
-MODULE_LICENSE("GPL");
-
-module_init(dump_blockdev_init);
-module_exit(dump_blockdev_cleanup);
+++ /dev/null
-/*
- * The file has the common/generic dump execution code
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- * Split and rewrote high level dump execute code to make use
- * of dump method interfaces.
- *
- * Derived from original code in dump_base.c created by
- * Matt Robinson <yakker@sourceforge.net>)
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * Assumes dumper and dump config settings are in place
- * (invokes corresponding dumper specific routines as applicable)
- *
- * This code is released under version 2 of the GNU GPL.
- */
-#include <linux/kernel.h>
-#include <linux/notifier.h>
-#include <linux/dump.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include "dump_methods.h"
-
-struct notifier_block *dump_notifier_list; /* dump started/ended callback */
-
-extern int panic_timeout;
-
-/* Dump progress indicator */
-void
-dump_speedo(int i)
-{
- static const char twiddle[4] = { '|', '\\', '-', '/' };
- printk("%c\b", twiddle[i&3]);
-}
-
-/* Make the device ready and write out the header */
-int dump_begin(void)
-{
- int err = 0;
-
- /* dump_dev = dump_config.dumper->dev; */
- dumper_reset();
- if ((err = dump_dev_silence())) {
- /* quiesce failed, can't risk continuing */
- /* Todo/Future: switch to alternate dump scheme if possible */
- printk("dump silence dev failed ! error %d\n", err);
- return err;
- }
-
- pr_debug("Writing dump header\n");
- if ((err = dump_update_header())) {
- printk("dump update header failed ! error %d\n", err);
- dump_dev_resume();
- return err;
- }
-
- dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
-
- return 0;
-}
-
-/*
- * Write the dump terminator, a final header update and let go of
- * exclusive use of the device for dump.
- */
-int dump_complete(void)
-{
- int ret = 0;
-
- if (dump_config.level != DUMP_LEVEL_HEADER) {
- if ((ret = dump_update_end_marker())) {
- printk("dump update end marker error %d\n", ret);
- }
- if ((ret = dump_update_header())) {
- printk("dump update header error %d\n", ret);
- }
- }
- ret = dump_dev_resume();
-
- if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
- mdelay(panic_timeout * 1000);
- machine_restart(NULL);
- }
-
- return ret;
-}
-
-/* Saves all dump data */
-int dump_execute_savedump(void)
-{
- int ret = 0, err = 0;
-
- if ((ret = dump_begin())) {
- return ret;
- }
-
- if (dump_config.level != DUMP_LEVEL_HEADER) {
- ret = dump_sequencer();
- }
- if ((err = dump_complete())) {
- printk("Dump complete failed. Error %d\n", err);
- }
-
- return ret;
-}
-
-extern void dump_calc_bootmap_pages(void);
-
-/* Does all the real work: Capture and save state */
-int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
-{
- int ret = 0;
-
- if ((ret = dump_configure_header(panic_str, regs))) {
- printk("dump config header failed ! error %d\n", ret);
- return ret;
- }
-
- dump_calc_bootmap_pages();
- /* tell interested parties that a dump is about to start */
- notifier_call_chain(&dump_notifier_list, DUMP_BEGIN,
- &dump_config.dump_device);
-
- if (dump_config.level != DUMP_LEVEL_NONE)
- ret = dump_execute_savedump();
-
- pr_debug("dumped %ld blocks of %d bytes each\n",
- dump_config.dumper->count, DUMP_BUFFER_SIZE);
-
- /* tell interested parties that a dump has completed */
- notifier_call_chain(&dump_notifier_list, DUMP_END,
- &dump_config.dump_device);
-
- return ret;
-}
+++ /dev/null
-/*
- * Default filters to select data to dump for various passes.
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- * Split and rewrote default dump selection logic to generic dump
- * method interfaces
- * Derived from a portion of dump_base.c created by
- * Matt Robinson <yakker@sourceforge.net>)
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * Used during single-stage dumping and during stage 1 of the 2-stage scheme
- * (Stage 2 of the 2-stage scheme uses the fully transparent filters
- * i.e. passthru filters in dump_overlay.c)
- *
- * Future: Custom selective dump may involve a different set of filters.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#include <linux/kernel.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-
-#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
-static unsigned long bootmap_pages;
-
-/* Copied from mm/bootmem.c - FIXME */
-/* return the number of _pages_ that will be allocated for the boot bitmap */
-void dump_calc_bootmap_pages (void)
-{
- unsigned long mapsize;
- unsigned long pages = num_physpages;
-
- mapsize = (pages+7)/8;
- mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
- mapsize >>= PAGE_SHIFT;
- bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
-}
-
-
-/* temporary */
-extern unsigned long min_low_pfn;
-
-
-int dump_low_page(struct page *p)
-{
- return ((page_to_pfn(p) >= min_low_pfn) &&
- (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
-}
-
-static inline int kernel_page(struct page *p)
-{
- /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
- return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
-}
-
-static inline int user_page(struct page *p)
-{
- return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
-}
-
-static inline int unreferenced_page(struct page *p)
-{
- return !PageInuse(p) && !PageReserved(p);
-}
-
-
-/* loc marks the beginning of a range of pages */
-int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
-{
- struct page *page = (struct page *)loc;
- /* if any of the pages is a kernel page, select this set */
- while (sz) {
- if (dump_low_page(page) || kernel_page(page))
- return 1;
- sz -= PAGE_SIZE;
- page++;
- }
- return 0;
-}
-
-
-/* loc marks the beginning of a range of pages */
-int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
-{
- struct page *page = (struct page *)loc;
- int ret = 0;
- /* select if the set has any user page, and no kernel pages */
- while (sz) {
- if (user_page(page) && !dump_low_page(page)) {
- ret = 1;
- } else if (kernel_page(page) || dump_low_page(page)) {
- return 0;
- }
- page++;
- sz -= PAGE_SIZE;
- }
- return ret;
-}
-
-
-
-/* loc marks the beginning of a range of pages */
-int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
-{
- struct page *page = (struct page *)loc;
-
- /* select if the set does not have any used pages */
- while (sz) {
- if (!unreferenced_page(page) || dump_low_page(page)) {
- return 0;
- }
- page++;
- sz -= PAGE_SIZE;
- }
- return 1;
-}
-
-/* dummy: last (non-existent) pass */
-int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
-{
- return 0;
-}
-
-/* TBD: resolve level bitmask ? */
-struct dump_data_filter dump_filter_table[] = {
- { .name = "kern", .selector = dump_filter_kernpages,
- .level_mask = DUMP_MASK_KERN},
- { .name = "user", .selector = dump_filter_userpages,
- .level_mask = DUMP_MASK_USED},
- { .name = "unused", .selector = dump_filter_unusedpages,
- .level_mask = DUMP_MASK_UNUSED},
- { .name = "none", .selector = dump_filter_none,
- .level_mask = DUMP_MASK_REST},
- { .name = "", .selector = NULL, .level_mask = 0}
-};
-
+++ /dev/null
-/*
- * Implements the routines which handle the format specific
- * aspects of dump for the default dump format.
- *
- * Used in single stage dumping and stage 1 of soft-boot based dumping
- * Saves data in LKCD (lcrash) format
- *
- * Previously a part of dump_base.c
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- * Split off and reshuffled LKCD dump format code around generic
- * dump method interfaces.
- *
- * Derived from original code created by
- * Matt Robinson <yakker@sourceforge.net>)
- *
- * Contributions from SGI, IBM, HP, MCL, and others.
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/utsname.h>
-#include <asm/dump.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-
-/*
- * SYSTEM DUMP LAYOUT
- *
- * System dumps are currently the combination of a dump header and a set
- * of data pages which contain the system memory. The layout of the dump
- * (for full dumps) is as follows:
- *
- * +-----------------------------+
- * | generic dump header |
- * +-----------------------------+
- * | architecture dump header |
- * +-----------------------------+
- * | page header |
- * +-----------------------------+
- * | page data |
- * +-----------------------------+
- * | page header |
- * +-----------------------------+
- * | page data |
- * +-----------------------------+
- * | | |
- * | | |
- * | | |
- * | | |
- * | V |
- * +-----------------------------+
- * | PAGE_END header |
- * +-----------------------------+
- *
- * There are two dump headers, the first which is architecture
- * independent, and the other which is architecture dependent. This
- * allows different architectures to dump different data structures
- * which are specific to their chipset, CPU, etc.
- *
- * After the dump headers come a succession of dump page headers along
- * with dump pages. The page header contains information about the page
- * size, any flags associated with the page (whether it's compressed or
- * not), and the address of the page. After the page header is the page
- * data, which is either compressed (or not). Each page of data is
- * dumped in succession, until the final dump header (PAGE_END) is
- * placed at the end of the dump, assuming the dump device isn't out
- * of space.
- *
- * This mechanism allows for multiple compression types, different
- * types of data structures, different page ordering, etc., etc., etc.
- * It's a very straightforward mechanism for dumping system memory.
- */
-
-struct __dump_header dump_header; /* the primary dump header */
-struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
-
-/*
- * Set up common header fields (mainly the arch indep section)
- * Per-cpu state is handled by lcrash_save_context
- * Returns the size of the header in bytes.
- */
-static int lcrash_init_dump_header(const char *panic_str)
-{
- struct timeval dh_time;
- unsigned long temp_dha_stack[DUMP_MAX_NUM_CPUS];
- u64 temp_memsz = dump_header.dh_memory_size;
-
- /* make sure the dump header isn't TOO big */
- if ((sizeof(struct __dump_header) +
- sizeof(struct __dump_header_asm)) > DUMP_BUFFER_SIZE) {
- printk("lcrash_init_header(): combined "
- "headers larger than DUMP_BUFFER_SIZE!\n");
- return -E2BIG;
- }
-
- /* initialize the dump headers to zero */
- /* save dha_stack pointer because it may contains pointer for stack! */
- memcpy(&(temp_dha_stack[0]), &(dump_header_asm.dha_stack[0]),
- DUMP_MAX_NUM_CPUS * sizeof(unsigned long));
- memset(&dump_header, 0, sizeof(dump_header));
- memset(&dump_header_asm, 0, sizeof(dump_header_asm));
- dump_header.dh_memory_size = temp_memsz;
- memcpy(&(dump_header_asm.dha_stack[0]), &(temp_dha_stack[0]),
- DUMP_MAX_NUM_CPUS * sizeof(unsigned long));
-
- /* configure dump header values */
- dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
- dump_header.dh_version = DUMP_VERSION_NUMBER;
- dump_header.dh_memory_start = PAGE_OFFSET;
- dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
- dump_header.dh_header_size = sizeof(struct __dump_header);
- dump_header.dh_page_size = PAGE_SIZE;
- dump_header.dh_dump_level = dump_config.level;
- dump_header.dh_current_task = (unsigned long) current;
- dump_header.dh_dump_compress = dump_config.dumper->compress->
- compress_type;
- dump_header.dh_dump_flags = dump_config.flags;
- dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
-
-#if DUMP_DEBUG >= 6
- dump_header.dh_num_bytes = 0;
-#endif
- dump_header.dh_num_dump_pages = 0;
- do_gettimeofday(&dh_time);
- dump_header.dh_time.tv_sec = dh_time.tv_sec;
- dump_header.dh_time.tv_usec = dh_time.tv_usec;
-
- memcpy((void *)&(dump_header.dh_utsname_sysname),
- (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
- memcpy((void *)&(dump_header.dh_utsname_nodename),
- (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
- memcpy((void *)&(dump_header.dh_utsname_release),
- (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
- memcpy((void *)&(dump_header.dh_utsname_version),
- (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
- memcpy((void *)&(dump_header.dh_utsname_machine),
- (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
- memcpy((void *)&(dump_header.dh_utsname_domainname),
- (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
-
- if (panic_str) {
- memcpy((void *)&(dump_header.dh_panic_string),
- (const void *)panic_str, DUMP_PANIC_LEN);
- }
-
- dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
- dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
- dump_header_asm.dha_header_size = sizeof(dump_header_asm);
-#ifdef CONFIG_ARM
- dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
-#endif
-
- dump_header_asm.dha_smp_num_cpus = num_online_cpus();
- pr_debug("smp_num_cpus in header %d\n",
- dump_header_asm.dha_smp_num_cpus);
-
- dump_header_asm.dha_dumping_cpu = smp_processor_id();
-
- return sizeof(dump_header) + sizeof(dump_header_asm);
-}
-
-
-int dump_lcrash_configure_header(const char *panic_str,
- const struct pt_regs *regs)
-{
- int retval = 0;
-
- dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
-
- /* capture register states for all processors */
- dump_save_this_cpu(regs);
- __dump_save_other_cpus(); /* side effect:silence cpus */
-
- /* configure architecture-specific dump header values */
- if ((retval = __dump_configure_header(regs)))
- return retval;
-
- dump_config.dumper->header_dirty++;
- return 0;
-}
-
-/* save register and task context */
-void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
- struct task_struct *tsk)
-{
- dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
-
- __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
-
- /* take a snapshot of the stack */
- /* doing this enables us to tolerate slight drifts on this cpu */
- if (dump_header_asm.dha_stack[cpu]) {
- memcpy((void *)dump_header_asm.dha_stack[cpu],
- tsk->thread_info, THREAD_SIZE);
- }
- dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
-}
-
-/* write out the header */
-int dump_write_header(void)
-{
- int retval = 0, size;
- void *buf = dump_config.dumper->dump_buf;
-
- /* accounts for DUMP_HEADER_OFFSET if applicable */
- if ((retval = dump_dev_seek(0))) {
- printk("Unable to seek to dump header offset: %d\n",
- retval);
- return retval;
- }
-
- memcpy(buf, (void *)&dump_header, sizeof(dump_header));
- size = sizeof(dump_header);
- memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
- size += sizeof(dump_header_asm);
- size = PAGE_ALIGN(size);
- retval = dump_ll_write(buf , size);
-
- if (retval < size)
- return (retval >= 0) ? ENOSPC : retval;
- return 0;
-}
-
-int dump_generic_update_header(void)
-{
- int err = 0;
-
- if (dump_config.dumper->header_dirty) {
- if ((err = dump_write_header())) {
- printk("dump write header failed !err %d\n", err);
- } else {
- dump_config.dumper->header_dirty = 0;
- }
- }
-
- return err;
-}
-
-static inline int is_curr_stack_page(struct page *page, unsigned long size)
-{
- unsigned long thread_addr = (unsigned long)current_thread_info();
- unsigned long addr = (unsigned long)page_address(page);
-
- return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
- && (addr + size > thread_addr);
-}
-
-static inline int is_dump_page(struct page *page, unsigned long size)
-{
- unsigned long addr = (unsigned long)page_address(page);
- unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
-
- return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
- && (addr + size > dump_buf);
-}
-
-int dump_allow_compress(struct page *page, unsigned long size)
-{
- /*
- * Don't compress the page if any part of it overlaps
- * with the current stack or dump buffer (since the contents
- * in these could be changing while compression is going on)
- */
- return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
-}
-
-void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
- unsigned long sz)
-{
- memset(dp, sizeof(struct __dump_page), 0);
- dp->dp_flags = 0;
- dp->dp_size = 0;
- if (sz > 0)
- dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
-
-#if DUMP_DEBUG > 6
- dp->dp_page_index = dump_header.dh_num_dump_pages;
- dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
- + DUMP_HEADER_OFFSET; /* ?? */
-#endif /* DUMP_DEBUG */
-}
-
-int dump_lcrash_add_data(unsigned long loc, unsigned long len)
-{
- struct page *page = (struct page *)loc;
- void *addr, *buf = dump_config.dumper->curr_buf;
- struct __dump_page *dp = (struct __dump_page *)buf;
- int bytes, size;
-
- if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
- return -ENOMEM;
-
- lcrash_init_pageheader(dp, page, len);
- buf += sizeof(struct __dump_page);
-
- while (len) {
- addr = kmap_atomic(page, KM_DUMP);
- size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
- /* check for compression */
- if (dump_allow_compress(page, bytes)) {
- size = dump_compress_data((char *)addr, bytes, (char *)buf);
- }
- /* set the compressed flag if the page did compress */
- if (size && (size < bytes)) {
- dp->dp_flags |= DUMP_DH_COMPRESSED;
- } else {
- /* compression failed -- default to raw mode */
- dp->dp_flags |= DUMP_DH_RAW;
- memcpy(buf, addr, bytes);
- size = bytes;
- }
- /* memset(buf, 'A', size); temporary: testing only !! */
- kunmap_atomic(addr, KM_DUMP);
- dp->dp_size += size;
- buf += size;
- len -= bytes;
- page++;
- }
-
- /* now update the header */
-#if DUMP_DEBUG > 6
- dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
-#endif
- dump_header.dh_num_dump_pages++;
- dump_config.dumper->header_dirty++;
-
- dump_config.dumper->curr_buf = buf;
-
- return len;
-}
-
-int dump_lcrash_update_end_marker(void)
-{
- struct __dump_page *dp =
- (struct __dump_page *)dump_config.dumper->curr_buf;
- unsigned long left;
- int ret = 0;
-
- lcrash_init_pageheader(dp, NULL, 0);
- dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
-
- /* now update the header */
-#if DUMP_DEBUG > 6
- dump_header.dh_num_bytes += sizeof(*dp);
-#endif
- dump_config.dumper->curr_buf += sizeof(*dp);
- left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
-
- printk("\n");
-
- while (left) {
- if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
- printk("Seek failed at offset 0x%llx\n",
- dump_config.dumper->curr_offset);
- return ret;
- }
-
- if (DUMP_BUFFER_SIZE > left)
- memset(dump_config.dumper->curr_buf, 'm',
- DUMP_BUFFER_SIZE - left);
-
- if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
- DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
- return (ret < 0) ? ret : -ENOSPC;
- }
-
- dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
-
- if (left > DUMP_BUFFER_SIZE) {
- left -= DUMP_BUFFER_SIZE;
- memcpy(dump_config.dumper->dump_buf,
- dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
- dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
- } else {
- left = 0;
- }
- }
- return 0;
-}
-
-
-/* Default Formatter (lcrash) */
-struct dump_fmt_ops dump_fmt_lcrash_ops = {
- .configure_header = dump_lcrash_configure_header,
- .update_header = dump_generic_update_header,
- .save_context = dump_lcrash_save_context,
- .add_data = dump_lcrash_add_data,
- .update_end_marker = dump_lcrash_update_end_marker
-};
-
-struct dump_fmt dump_fmt_lcrash = {
- .name = "lcrash",
- .ops = &dump_fmt_lcrash_ops
-};
-
+++ /dev/null
-/*
- * GZIP Compression functions for kernel crash dumps.
- *
- * Created by: Matt Robinson (yakker@sourceforge.net)
- * Copyright 2001 Matt D. Robinson. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/* header files */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/dump.h>
-#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-
-static void *deflate_workspace;
-
-/*
- * Name: dump_compress_gzip()
- * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
- * deflate functions similar to what's used in PPP).
- */
-static u16
-dump_compress_gzip(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
-{
- /* error code and dump stream */
- int err;
- z_stream dump_stream;
-
- dump_stream.workspace = deflate_workspace;
-
- if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
- /* fall back to RLE compression */
- printk("dump_compress_gzip(): zlib_deflateInit() "
- "failed (%d)!\n", err);
- return 0;
- }
-
- /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
- dump_stream.next_in = (u8 *) old;
- dump_stream.avail_in = oldsize;
-
- /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
- dump_stream.next_out = new;
- dump_stream.avail_out = newsize;
-
- /* deflate the page -- check for error */
- err = zlib_deflate(&dump_stream, Z_FINISH);
- if (err != Z_STREAM_END) {
- /* zero is return code here */
- (void)zlib_deflateEnd(&dump_stream);
- printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
- err);
- return 0;
- }
-
- /* let's end the deflated compression stream */
- if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
- printk("dump_compress_gzip(): zlib_deflateEnd() "
- "failed (%d)!\n", err);
- }
-
- /* return the compressed byte total (if it's smaller) */
- if (dump_stream.total_out >= oldsize) {
- return oldsize;
- }
- return dump_stream.total_out;
-}
-
-/* setup the gzip compression functionality */
-static struct __dump_compress dump_gzip_compression = {
- .compress_type = DUMP_COMPRESS_GZIP,
- .compress_func = dump_compress_gzip,
- .compress_name = "GZIP",
-};
-
-/*
- * Name: dump_compress_gzip_init()
- * Func: Initialize gzip as a compression mechanism.
- */
-static int __init
-dump_compress_gzip_init(void)
-{
- deflate_workspace = vmalloc(zlib_deflate_workspacesize());
- if (!deflate_workspace) {
- printk("dump_compress_gzip_init(): Failed to "
- "alloc %d bytes for deflate workspace\n",
- zlib_deflate_workspacesize());
- return -ENOMEM;
- }
- dump_register_compression(&dump_gzip_compression);
- return 0;
-}
-
-/*
- * Name: dump_compress_gzip_cleanup()
- * Func: Remove gzip as a compression mechanism.
- */
-static void __exit
-dump_compress_gzip_cleanup(void)
-{
- vfree(deflate_workspace);
- dump_unregister_compression(DUMP_COMPRESS_GZIP);
-}
-
-/* module initialization */
-module_init(dump_compress_gzip_init);
-module_exit(dump_compress_gzip_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
+++ /dev/null
-/*
- * Architecture specific (i386) functions for Linux crash dumps.
- *
- * Created by: Matt Robinson (yakker@sgi.com)
- *
- * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
- *
- * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
- * Copyright 2000 TurboLinux, Inc. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * The hooks for dumping the kernel virtual memory to disk are in this
- * file. Any time a modification is made to the virtual memory mechanism,
- * these routines must be changed to use the new mechanisms.
- */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/fs.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-#include <linux/irq.h>
-
-#include <asm/processor.h>
-#include <asm/e820.h>
-#include <asm/hardirq.h>
-#include <asm/nmi.h>
-
-static __s32 saved_irq_count; /* saved preempt_count() flags */
-
-static int
-alloc_dha_stack(void)
-{
- int i;
- void *ptr;
-
- if (dump_header_asm.dha_stack[0])
- return 0;
-
- ptr = vmalloc(THREAD_SIZE * num_online_cpus());
- if (!ptr) {
- printk("vmalloc for dha_stacks failed\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < num_online_cpus(); i++) {
- dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
- (i * THREAD_SIZE));
- }
- return 0;
-}
-
-static int
-free_dha_stack(void)
-{
- if (dump_header_asm.dha_stack[0]) {
- vfree((void *)dump_header_asm.dha_stack[0]);
- dump_header_asm.dha_stack[0] = 0;
- }
- return 0;
-}
-
-
-void
-__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
-{
- *dest_regs = *regs;
-
- /* In case of panic dumps, we collects regs on entry to panic.
- * so, we shouldn't 'fix' ssesp here again. But it is hard to
- * tell just looking at regs whether ssesp need fixing. We make
- * this decision by looking at xss in regs. If we have better
- * means to determine that ssesp are valid (by some flag which
- * tells that we are here due to panic dump), then we can use
- * that instead of this kludge.
- */
- if (!user_mode(regs)) {
- if ((0xffff & regs->xss) == __KERNEL_DS)
- /* already fixed up */
- return;
- dest_regs->esp = (unsigned long)&(regs->esp);
- __asm__ __volatile__ ("movw %%ss, %%ax;"
- :"=a"(dest_regs->xss));
- }
-}
-
-
-#ifdef CONFIG_SMP
-extern cpumask_t irq_affinity[];
-extern irq_desc_t irq_desc[];
-extern void dump_send_ipi(void);
-
-static int dump_expect_ipi[NR_CPUS];
-static atomic_t waiting_for_dump_ipi;
-static cpumask_t saved_affinity[NR_IRQS];
-
-extern void stop_this_cpu(void *); /* exported by i386 kernel */
-
-static int
-dump_nmi_callback(struct pt_regs *regs, int cpu)
-{
- if (!dump_expect_ipi[cpu])
- return 0;
-
- dump_expect_ipi[cpu] = 0;
-
- dump_save_this_cpu(regs);
- atomic_dec(&waiting_for_dump_ipi);
-
- level_changed:
- switch (dump_silence_level) {
- case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
- while (dump_oncpu) {
- barrier(); /* paranoia */
- if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
- goto level_changed;
-
- cpu_relax(); /* kill time nicely */
- }
- break;
-
- case DUMP_HALT_CPUS: /* Execute halt */
- stop_this_cpu(NULL);
- break;
-
- case DUMP_SOFT_SPIN_CPUS:
- /* Mark the task so it spins in schedule */
- set_tsk_thread_flag(current, TIF_NEED_RESCHED);
- break;
- }
-
- return 1;
-}
-
-/* save registers on other processors */
-void
-__dump_save_other_cpus(void)
-{
- int i, cpu = smp_processor_id();
- int other_cpus = num_online_cpus()-1;
-
- if (other_cpus > 0) {
- atomic_set(&waiting_for_dump_ipi, other_cpus);
-
- for (i = 0; i < NR_CPUS; i++) {
- dump_expect_ipi[i] = (i != cpu && cpu_online(i));
- }
-
- /* short circuit normal NMI handling temporarily */
- set_nmi_callback(dump_nmi_callback);
- wmb();
-
- dump_send_ipi();
- /* may be we dont need to wait for NMI to be processed.
- just write out the header at the end of dumping, if
- this IPI is not processed until then, there probably
- is a problem and we just fail to capture state of
- other cpus. */
- while(atomic_read(&waiting_for_dump_ipi) > 0) {
- cpu_relax();
- }
-
- unset_nmi_callback();
- }
-}
-
-/*
- * Routine to save the old irq affinities and change affinities of all irqs to
- * the dumping cpu.
- */
-static void
-set_irq_affinity(void)
-{
- int i;
- cpumask_t cpu = CPU_MASK_NONE;
-
- cpu_set(smp_processor_id(), cpu);
- memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
- for (i = 0; i < NR_IRQS; i++) {
- if (irq_desc[i].handler == NULL)
- continue;
- irq_affinity[i] = cpu;
- if (irq_desc[i].handler->set_affinity != NULL)
- irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
- }
-}
-
-/*
- * Restore old irq affinities.
- */
-static void
-reset_irq_affinity(void)
-{
- int i;
-
- memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
- for (i = 0; i < NR_IRQS; i++) {
- if (irq_desc[i].handler == NULL)
- continue;
- if (irq_desc[i].handler->set_affinity != NULL)
- irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
- }
-}
-
-#else /* !CONFIG_SMP */
-#define set_irq_affinity() do { } while (0)
-#define reset_irq_affinity() do { } while (0)
-#define save_other_cpu_states() do { } while (0)
-#endif /* !CONFIG_SMP */
-
-/*
- * Kludge - dump from interrupt context is unreliable (Fixme)
- *
- * We do this so that softirqs initiated for dump i/o
- * get processed and we don't hang while waiting for i/o
- * to complete or in any irq synchronization attempt.
- *
- * This is not quite legal of course, as it has the side
- * effect of making all interrupts & softirqs triggered
- * while dump is in progress complete before currently
- * pending softirqs and the currently executing interrupt
- * code.
- */
-static inline void
-irq_bh_save(void)
-{
- saved_irq_count = irq_count();
- preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-}
-
-static inline void
-irq_bh_restore(void)
-{
- preempt_count() |= saved_irq_count;
-}
-
-/*
- * Name: __dump_irq_enable
- * Func: Reset system so interrupts are enabled.
- * This is used for dump methods that require interrupts
- * Eventually, all methods will have interrupts disabled
- * and this code can be removed.
- *
- * Change irq affinities
- * Re-enable interrupts
- */
-int
-__dump_irq_enable(void)
-{
- set_irq_affinity();
- irq_bh_save();
- local_irq_enable();
- return 0;
-}
-
-/*
- * Name: __dump_irq_restore
- * Func: Resume the system state in an architecture-specific way.
-
- */
-void
-__dump_irq_restore(void)
-{
- local_irq_disable();
- reset_irq_affinity();
- irq_bh_restore();
-}
-
-/*
- * Name: __dump_configure_header()
- * Func: Meant to fill in arch specific header fields except per-cpu state
- * already captured via __dump_save_context for all CPUs.
- */
-int
-__dump_configure_header(const struct pt_regs *regs)
-{
- return (0);
-}
-
-/*
- * Name: __dump_init()
- * Func: Initialize the dumping routine process.
- */
-void
-__dump_init(uint64_t local_memory_start)
-{
- return;
-}
-
-/*
- * Name: __dump_open()
- * Func: Open the dump device (architecture specific).
- */
-void
-__dump_open(void)
-{
- alloc_dha_stack();
-}
-
-/*
- * Name: __dump_cleanup()
- * Func: Free any architecture specific data structures. This is called
- * when the dump module is being removed.
- */
-void
-__dump_cleanup(void)
-{
- free_dha_stack();
-}
-
-extern int pfn_is_ram(unsigned long);
-
-/*
- * Name: __dump_page_valid()
- * Func: Check if page is valid to dump.
- */
-int
-__dump_page_valid(unsigned long index)
-{
- if (!pfn_valid(index))
- return 0;
-
- return pfn_is_ram(index);
-}
-
-/*
- * Name: manual_handle_crashdump()
- * Func: Interface for the lkcd dump command. Calls dump_execute()
- */
-int
-manual_handle_crashdump(void) {
-
- struct pt_regs regs;
-
- get_current_regs(®s);
- dump_execute("manual", ®s);
- return 0;
-}
+++ /dev/null
-/*
- * Implements the dump driver interface for saving a dump in available
- * memory areas. The saved pages may be written out to persistent storage
- * after a soft reboot.
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- *
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- *
- * The approach of tracking pages containing saved dump using map pages
- * allocated as needed has been derived from the Mission Critical Linux
- * mcore dump implementation.
- *
- * Credits and a big thanks for letting the lkcd project make use of
- * the excellent piece of work and also helping with clarifications
- * and tips along the way are due to:
- * Dave Winchell <winchell@mclx.com> (primary author of mcore)
- * Jeff Moyer <moyer@mclx.com>
- * Josh Huber <huber@mclx.com>
- *
- * For those familiar with the mcore code, the main differences worth
- * noting here (besides the dump device abstraction) result from enabling
- * "high" memory pages (pages not permanently mapped in the kernel
- * address space) to be used for saving dump data (because of which a
- * simple virtual address based linked list cannot be used anymore for
- * managing free pages), an added level of indirection for faster
- * lookups during the post-boot stage, and the idea of pages being
- * made available as they get freed up while dump to memory progresses
- * rather than one time before starting the dump. The last point enables
- * a full memory snapshot to be saved starting with an initial set of
- * bootstrap pages given a good compression ratio. (See dump_overlay.c)
- *
- */
-
-/*
- * -----------------MEMORY LAYOUT ------------------
- * The memory space consists of a set of discontiguous pages, and
- * discontiguous map pages as well, rooted in a chain of indirect
- * map pages (also discontiguous). Except for the indirect maps
- * (which must be preallocated in advance), the rest of the pages
- * could be in high memory.
- *
- * root
- * | --------- -------- --------
- * --> | . . +|--->| . +|------->| . . | indirect
- * --|--|--- ---|---- --|-|--- maps
- * | | | | |
- * ------ ------ ------- ------ -------
- * | . | | . | | . . | | . | | . . | maps
- * --|--- --|--- --|--|-- --|--- ---|-|--
- * page page page page page page page data
- * pages
- *
- * Writes to the dump device happen sequentially in append mode.
- * The main reason for the existence of the indirect map is
- * to enable a quick way to lookup a specific logical offset in
- * the saved data post-soft-boot, e.g. to writeout pages
- * with more critical data first, even though such pages
- * would have been compressed and copied last, being the lowest
- * ranked candidates for reuse due to their criticality.
- * (See dump_overlay.c)
- */
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/bootmem.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-
-#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
-#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */
-#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */
-
-extern int dump_low_page(struct page *);
-
-/* check if the next entry crosses a page boundary */
-static inline int is_last_map_entry(unsigned long *map)
-{
- unsigned long addr = (unsigned long)(map + 1);
-
- return (!(addr & (PAGE_SIZE - 1)));
-}
-
-/* Todo: should have some validation checks */
-/* The last entry in the indirect map points to the next indirect map */
-/* Indirect maps are referred to directly by virtual address */
-static inline unsigned long *next_indirect_map(unsigned long *map)
-{
- return (unsigned long *)map[DUMP_IND_MAP_SZ];
-}
-
-#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-/* Called during early bootup - fixme: make this __init */
-void dump_early_reserve_map(struct dump_memdev *dev)
-{
- unsigned long *map1, *map2;
- loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
- int i, j;
-
- printk("Reserve bootmap space holding previous dump of %lld pages\n",
- last);
- map1= (unsigned long *)dev->indirect_map_root;
-
- while (map1 && (off < last)) {
- reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
- for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
- i++, off += DUMP_MAP_SZ) {
- pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
- if (map1[i] >= max_low_pfn)
- continue;
- reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
- map2 = pfn_to_kaddr(map1[i]);
- for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
- (off + j < last); j++) {
- pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
- map2[j]);
- if (map2[j] < max_low_pfn) {
- reserve_bootmem(map2[j] << PAGE_SHIFT,
- PAGE_SIZE);
- }
- }
- }
- map1 = next_indirect_map(map1);
- }
- dev->nr_free = 0; /* these pages don't belong to this boot */
-}
-#endif
-
-/* mark dump pages so that they aren't used by this kernel */
-void dump_mark_map(struct dump_memdev *dev)
-{
- unsigned long *map1, *map2;
- loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
- struct page *page;
- int i, j;
-
- printk("Dump: marking pages in use by previous dump\n");
- map1= (unsigned long *)dev->indirect_map_root;
-
- while (map1 && (off < last)) {
- page = virt_to_page(map1);
- set_page_count(page, 1);
- for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
- i++, off += DUMP_MAP_SZ) {
- pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
- page = pfn_to_page(map1[i]);
- set_page_count(page, 1);
- map2 = kmap_atomic(page, KM_DUMP);
- for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
- (off + j < last); j++) {
- pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
- map2[j]);
- page = pfn_to_page(map2[j]);
- set_page_count(page, 1);
- }
- }
- map1 = next_indirect_map(map1);
- }
-}
-
-
-/*
- * Given a logical offset into the mem device lookup the
- * corresponding page
- * loc is specified in units of pages
- * Note: affects curr_map (even in the case where lookup fails)
- */
-struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
-{
- unsigned long *map;
- unsigned long i, index = loc / DUMP_MAP_SZ;
- struct page *page = NULL;
- unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
-
- map = (unsigned long *)dump_mdev->indirect_map_root;
- if (!map)
- return NULL;
-
- if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
- return NULL;
-
- /*
- * first locate the right indirect map
- * in the chain of indirect maps
- */
- for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
- if (!(map = next_indirect_map(map)))
- return NULL;
- }
- /* then the right direct map */
- /* map entries are referred to by page index */
- if ((curr_map = map[index - i])) {
- page = pfn_to_page(curr_map);
- /* update the current traversal index */
- /* dump_mdev->curr_map = &map[index - i];*/
- curr_map_ptr = &map[index - i];
- }
-
- if (page)
- map = kmap_atomic(page, KM_DUMP);
- else
- return NULL;
-
- /* and finally the right entry therein */
- /* data pages are referred to by page index */
- i = index * DUMP_MAP_SZ;
- if ((curr_pfn = map[loc - i])) {
- page = pfn_to_page(curr_pfn);
- dump_mdev->curr_map = curr_map_ptr;
- dump_mdev->curr_map_offset = loc - i;
- dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
- } else {
- page = NULL;
- }
- kunmap_atomic(map, KM_DUMP);
-
- return page;
-}
-
-/*
- * Retrieves a pointer to the next page in the dump device
- * Used during the lookup pass post-soft-reboot
- */
-struct page *dump_mem_next_page(struct dump_memdev *dev)
-{
- unsigned long i;
- unsigned long *map;
- struct page *page = NULL;
-
- if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
- return NULL;
- }
-
- if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
- /* move to next map */
- if (is_last_map_entry(++dev->curr_map)) {
- /* move to the next indirect map page */
- printk("dump_mem_next_page: go to next indirect map\n");
- dev->curr_map = (unsigned long *)*dev->curr_map;
- if (!dev->curr_map)
- return NULL;
- }
- i = dev->curr_map_offset = 0;
- pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
- dev->curr_map, *dev->curr_map);
-
- };
-
- if (*dev->curr_map) {
- map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
- if (map[i])
- page = pfn_to_page(map[i]);
- kunmap_atomic(map, KM_DUMP);
- dev->ddev.curr_offset += PAGE_SIZE;
- };
-
- return page;
-}
-
-/* Copied from dump_filters.c */
-static inline int kernel_page(struct page *p)
-{
- /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
- return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
-}
-
-static inline int user_page(struct page *p)
-{
- return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
-}
-
-int dump_reused_by_boot(struct page *page)
-{
- /* Todo
- * Checks:
- * if PageReserved
- * if < __end + bootmem_bootmap_pages for this boot + allowance
- * if overwritten by initrd (how to check ?)
- * Also, add more checks in early boot code
- * e.g. bootmem bootmap alloc verify not overwriting dump, and if
- * so then realloc or move the dump pages out accordingly.
- */
-
- /* Temporary proof of concept hack, avoid overwriting kern pages */
-
- return (kernel_page(page) || dump_low_page(page) || user_page(page));
-}
-
-
-/* Uses the free page passed in to expand available space */
-int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
-{
- struct page *map_page;
- unsigned long *map;
- unsigned long i;
-
- if (!dev->curr_map)
- return -ENOMEM; /* must've exhausted indirect map */
-
- if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
- /* add map space */
- *dev->curr_map = page_to_pfn(page);
- dev->curr_map_offset = 0;
- return 0;
- }
-
- /* add data space */
- i = dev->curr_map_offset;
- map_page = pfn_to_page(*dev->curr_map);
- map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
- map[i] = page_to_pfn(page);
- kunmap_atomic(map, KM_DUMP);
- dev->curr_map_offset = ++i;
- dev->last_offset += PAGE_SIZE;
- if (i >= DUMP_MAP_SZ) {
- /* move to next map */
- if (is_last_map_entry(++dev->curr_map)) {
- /* move to the next indirect map page */
- pr_debug("dump_mem_add_space: using next"
- "indirect map\n");
- dev->curr_map = (unsigned long *)*dev->curr_map;
- }
- }
- return 0;
-}
-
-
-/* Caution: making a dest page invalidates existing contents of the page */
-int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
-{
- int err = 0;
-
- /*
- * the page can be used as a destination only if we are sure
- * it won't get overwritten by the soft-boot, and is not
- * critical for us right now.
- */
- if (dump_reused_by_boot(page))
- return 0;
-
- if ((err = dump_mem_add_space(dev, page))) {
- printk("Warning: Unable to extend memdev space. Err %d\n",
- err);
- return 0;
- }
-
- dev->nr_free++;
- return 1;
-}
-
-
-/* Set up the initial maps and bootstrap space */
-/* Must be called only after any previous dump is written out */
-int dump_mem_open(struct dump_dev *dev, unsigned long devid)
-{
- struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
- unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
- void *addr;
- struct page *page;
- unsigned long i = 0;
- int err = 0;
-
- /* Todo: sanity check for unwritten previous dump */
-
- /* allocate pages for indirect map (non highmem area) */
- nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
- for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
- if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
- printk("Unable to alloc indirect map %ld\n",
- i / DUMP_IND_MAP_SZ);
- return -ENOMEM;
- }
- clear_page(map);
- *prev_map = (unsigned long)map;
- prev_map = &map[DUMP_IND_MAP_SZ];
- };
-
- dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
- dump_mdev->curr_map_offset = 0;
-
- /*
- * allocate a few bootstrap pages: at least 1 map and 1 data page
- * plus enough to save the dump header
- */
- i = 0;
- do {
- if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
- printk("Unable to alloc bootstrap page %ld\n", i);
- return -ENOMEM;
- }
-
- page = virt_to_page(addr);
- if (dump_low_page(page)) {
- dump_free_mem(addr);
- continue;
- }
-
- if (dump_mem_add_space(dump_mdev, page)) {
- printk("Warning: Unable to extend memdev "
- "space. Err %d\n", err);
- dump_free_mem(addr);
- continue;
- }
- i++;
- } while (i < DUMP_NR_BOOTSTRAP);
-
- printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
- nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
-
- dump_mdev->last_bs_offset = dump_mdev->last_offset;
-
- return 0;
-}
-
-/* Releases all pre-alloc'd pages */
-int dump_mem_release(struct dump_dev *dev)
-{
- struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
- struct page *page, *map_page;
- unsigned long *map, *prev_map;
- void *addr;
- int i;
-
- if (!dump_mdev->nr_free)
- return 0;
-
- pr_debug("dump_mem_release\n");
- page = dump_mem_lookup(dump_mdev, 0);
- for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
- if (PageHighMem(page))
- break;
- addr = page_address(page);
- if (!addr) {
- printk("page_address(%p) = NULL\n", page);
- break;
- }
- pr_debug("Freeing page at 0x%lx\n", addr);
- dump_free_mem(addr);
- if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
- map_page = pfn_to_page(*dump_mdev->curr_map);
- if (PageHighMem(map_page))
- break;
- page = dump_mem_next_page(dump_mdev);
- addr = page_address(map_page);
- if (!addr) {
- printk("page_address(%p) = NULL\n",
- map_page);
- break;
- }
- pr_debug("Freeing map page at 0x%lx\n", addr);
- dump_free_mem(addr);
- i++;
- } else {
- page = dump_mem_next_page(dump_mdev);
- }
- }
-
- /* now for the last used bootstrap page used as a map page */
- if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
- map_page = pfn_to_page(*dump_mdev->curr_map);
- if ((map_page) && !PageHighMem(map_page)) {
- addr = page_address(map_page);
- if (!addr) {
- printk("page_address(%p) = NULL\n", map_page);
- } else {
- pr_debug("Freeing map page at 0x%lx\n", addr);
- dump_free_mem(addr);
- i++;
- }
- }
- }
-
- printk("Freed %d bootstrap pages\n", i);
-
- /* free the indirect maps */
- map = (unsigned long *)dump_mdev->indirect_map_root;
-
- i = 0;
- while (map) {
- prev_map = map;
- map = next_indirect_map(map);
- dump_free_mem(prev_map);
- i++;
- }
-
- printk("Freed %d indirect map(s)\n", i);
-
- /* Reset the indirect map */
- dump_mdev->indirect_map_root = 0;
- dump_mdev->curr_map = 0;
-
- /* Reset the free list */
- dump_mdev->nr_free = 0;
-
- dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
- dump_mdev->last_used_offset = 0;
- dump_mdev->curr_map = NULL;
- dump_mdev->curr_map_offset = 0;
- return 0;
-}
-
-/*
- * Long term:
- * It is critical for this to be very strict. Cannot afford
- * to have anything running and accessing memory while we overwrite
- * memory (potential risk of data corruption).
- * If in doubt (e.g if a cpu is hung and not responding) just give
- * up and refuse to proceed with this scheme.
- *
- * Note: I/O will only happen after soft-boot/switchover, so we can
- * safely disable interrupts and force stop other CPUs if this is
- * going to be a disruptive dump, no matter what they
- * are in the middle of.
- */
-/*
- * ATM Most of this is already taken care of in the nmi handler
- * We may halt the cpus rightaway if we know this is going to be disruptive
- * For now, since we've limited ourselves to overwriting free pages we
- * aren't doing much here. Eventually, we'd have to wait to make sure other
- * cpus aren't using memory we could be overwriting
- */
-int dump_mem_silence(struct dump_dev *dev)
-{
- struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-
- if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
- /* prefer to run lkcd config & start with a clean slate */
- return -EEXIST;
- }
- return 0;
-}
-
-extern int dump_overlay_resume(void);
-
-/* Trigger the next stage of dumping */
-int dump_mem_resume(struct dump_dev *dev)
-{
- dump_overlay_resume();
- return 0;
-}
-
-/*
- * Allocate mem dev pages as required and copy buffer contents into it.
- * Fails if the no free pages are available
- * Keeping it simple and limited for starters (can modify this over time)
- * Does not handle holes or a sparse layout
- * Data must be in multiples of PAGE_SIZE
- */
-int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
-{
- struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
- struct page *page;
- unsigned long n = 0;
- void *addr;
- unsigned long *saved_curr_map, saved_map_offset;
- int ret = 0;
-
- pr_debug("dump_mem_write: offset 0x%llx, size %ld\n",
- dev->curr_offset, len);
-
- if (dev->curr_offset + len > dump_mdev->last_offset) {
- printk("Out of space to write\n");
- return -ENOSPC;
- }
-
- if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
- return -EINVAL; /* not aligned in units of page size */
-
- saved_curr_map = dump_mdev->curr_map;
- saved_map_offset = dump_mdev->curr_map_offset;
- page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
-
- for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
- addr = kmap_atomic(page, KM_DUMP);
- /* memset(addr, 'x', PAGE_SIZE); */
- memcpy(addr, buf, PAGE_SIZE);
- kunmap_atomic(addr, KM_DUMP);
- /* dev->curr_offset += PAGE_SIZE; */
- page = dump_mem_next_page(dump_mdev);
- }
-
- dump_mdev->curr_map = saved_curr_map;
- dump_mdev->curr_map_offset = saved_map_offset;
-
- if (dump_mdev->last_used_offset < dev->curr_offset)
- dump_mdev->last_used_offset = dev->curr_offset;
-
- return (len - n) ? (len - n) : ret ;
-}
-
-/* dummy - always ready */
-int dump_mem_ready(struct dump_dev *dev, void *buf)
-{
- return 0;
-}
-
-/*
- * Should check for availability of space to write upto the offset
- * affects only the curr_offset; last_offset untouched
- * Keep it simple: Only allow multiples of PAGE_SIZE for now
- */
-int dump_mem_seek(struct dump_dev *dev, loff_t offset)
-{
- struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-
- if (offset & (PAGE_SIZE - 1))
- return -EINVAL; /* allow page size units only for now */
-
- /* Are we exceeding available space ? */
- if (offset > dump_mdev->last_offset) {
- printk("dump_mem_seek failed for offset 0x%llx\n",
- offset);
- return -ENOSPC;
- }
-
- dump_mdev->ddev.curr_offset = offset;
- return 0;
-}
-
-struct dump_dev_ops dump_memdev_ops = {
- .open = dump_mem_open,
- .release = dump_mem_release,
- .silence = dump_mem_silence,
- .resume = dump_mem_resume,
- .seek = dump_mem_seek,
- .write = dump_mem_write,
- .read = NULL, /* not implemented at the moment */
- .ready = dump_mem_ready
-};
-
-static struct dump_memdev default_dump_memdev = {
- .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
- .device_id = 0x14}
- /* assume the rest of the fields are zeroed by default */
-};
-
-/* may be overwritten if a previous dump exists */
-struct dump_memdev *dump_memdev = &default_dump_memdev;
-
+++ /dev/null
-/*
- * Generic interfaces for flexible system dump
- *
- * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
- *
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#ifndef _LINUX_DUMP_METHODS_H
-#define _LINUX_DUMP_METHODS_H
-
-/*
- * Inspired by Matt Robinson's suggestion of introducing dump
- * methods as a way to enable different crash dump facilities to
- * coexist where each employs its own scheme or dumping policy.
- *
- * The code here creates a framework for flexible dump by defining
- * a set of methods and providing associated helpers that differentiate
- * between the underlying mechanism (how to dump), overall scheme
- * (sequencing of stages and data dumped and associated quiescing),
- * output format (what the dump output looks like), target type
- * (where to save the dump; see dumpdev.h), and selection policy
- * (state/data to dump).
- *
- * These sets of interfaces can be mixed and matched to build a
- * dumper suitable for a given situation, allowing for
- * flexibility as well appropriate degree of code reuse.
- * For example all features and options of lkcd (including
- * granular selective dumping in the near future) should be
- * available even when say, the 2 stage soft-boot based mechanism
- * is used for taking disruptive dumps.
- *
- * Todo: Additionally modules or drivers may supply their own
- * custom dumpers which extend dump with module specific
- * information or hardware state, and can even tweak the
- * mechanism when it comes to saving state relevant to
- * them.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/dumpdev.h>
-
-#define MAX_PASSES 6
-#define MAX_DEVS 4
-
-
-/* To customise selection of pages to be dumped in a given pass/group */
-struct dump_data_filter{
- char name[32];
- int (*selector)(int, unsigned long, unsigned long);
- ulong level_mask; /* dump level(s) for which this filter applies */
- loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
- ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
-};
-
-
-/*
- * Determined by the kind of dump mechanism and appropriate
- * overall scheme
- */
-struct dump_scheme_ops {
- /* sets aside memory, inits data structures etc */
- int (*configure)(unsigned long devid);
- /* releases resources */
- int (*unconfigure)(void);
-
- /* ordering of passes, invoking iterator */
- int (*sequencer)(void);
- /* iterates over system data, selects and acts on data to dump */
- int (*iterator)(int, int (*)(unsigned long, unsigned long),
- struct dump_data_filter *);
- /* action when data is selected for dump */
- int (*save_data)(unsigned long, unsigned long);
- /* action when data is to be excluded from dump */
- int (*skip_data)(unsigned long, unsigned long);
- /* policies for space, multiple dump devices etc */
- int (*write_buffer)(void *, unsigned long);
-};
-
-struct dump_scheme {
- /* the name serves as an anchor to locate the scheme after reboot */
- char name[32];
- struct dump_scheme_ops *ops;
- struct list_head list;
-};
-
-/* Quiescing/Silence levels (controls IPI callback behaviour) */
-extern enum dump_silence_levels {
- DUMP_SOFT_SPIN_CPUS = 1,
- DUMP_HARD_SPIN_CPUS = 2,
- DUMP_HALT_CPUS = 3,
-} dump_silence_level;
-
-/* determined by the dump (file) format */
-struct dump_fmt_ops {
- /* build header */
- int (*configure_header)(const char *, const struct pt_regs *);
- int (*update_header)(void); /* update header and write it out */
- /* save curr context */
- void (*save_context)(int, const struct pt_regs *,
- struct task_struct *);
- /* typically called by the save_data action */
- /* add formatted data to the dump buffer */
- int (*add_data)(unsigned long, unsigned long);
- int (*update_end_marker)(void);
-};
-
-struct dump_fmt {
- unsigned long magic;
- char name[32]; /* lcrash, crash, elf-core etc */
- struct dump_fmt_ops *ops;
- struct list_head list;
-};
-
-/*
- * Modules will be able add their own data capture schemes by
- * registering their own dumpers. Typically they would use the
- * primary dumper as a template and tune it with their routines.
- * Still Todo.
- */
-
-/* The combined dumper profile (mechanism, scheme, dev, fmt) */
-struct dumper {
- char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
- struct dump_scheme *scheme;
- struct dump_fmt *fmt;
- struct __dump_compress *compress;
- struct dump_data_filter *filter;
- struct dump_dev *dev;
- /* state valid only for active dumper(s) - per instance */
- /* run time state/context */
- int curr_pass;
- unsigned long count;
- loff_t curr_offset; /* current logical offset into dump device */
- loff_t curr_loc; /* current memory location */
- void *curr_buf; /* current position in the dump buffer */
- void *dump_buf; /* starting addr of dump buffer */
- int header_dirty; /* whether the header needs to be written out */
- int header_len;
- struct list_head dumper_list; /* links to other dumpers */
-};
-
-/* Starting point to get to the current configured state */
-struct dump_config {
- ulong level;
- ulong flags;
- struct dumper *dumper;
- unsigned long dump_device;
- unsigned long dump_addr; /* relevant only for in-memory dumps */
- struct list_head dump_dev_list;
-};
-
-extern struct dump_config dump_config;
-
-/* Used to save the dump config across a reboot for 2-stage dumps:
- *
- * Note: The scheme, format, compression and device type should be
- * registered at bootup, for this config to be sharable across soft-boot.
- * The function addresses could have changed and become invalid, and
- * need to be set up again.
- */
-struct dump_config_block {
- u64 magic; /* for a quick sanity check after reboot */
- struct dump_memdev memdev; /* handle to dump stored in memory */
- struct dump_config config;
- struct dumper dumper;
- struct dump_scheme scheme;
- struct dump_fmt fmt;
- struct __dump_compress compress;
- struct dump_data_filter filter_table[MAX_PASSES];
- struct dump_anydev dev[MAX_DEVS]; /* target dump device */
-};
-
-
-/* Wrappers that invoke the methods for the current (active) dumper */
-
-/* Scheme operations */
-
-static inline int dump_sequencer(void)
-{
- return dump_config.dumper->scheme->ops->sequencer();
-}
-
-static inline int dump_iterator(int pass, int (*action)(unsigned long,
- unsigned long), struct dump_data_filter *filter)
-{
- return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
-}
-
-#define dump_save_data dump_config.dumper->scheme->ops->save_data
-#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
-
-static inline int dump_write_buffer(void *buf, unsigned long len)
-{
- return dump_config.dumper->scheme->ops->write_buffer(buf, len);
-}
-
-static inline int dump_configure(unsigned long devid)
-{
- return dump_config.dumper->scheme->ops->configure(devid);
-}
-
-static inline int dump_unconfigure(void)
-{
- return dump_config.dumper->scheme->ops->unconfigure();
-}
-
-/* Format operations */
-
-static inline int dump_configure_header(const char *panic_str,
- const struct pt_regs *regs)
-{
- return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
-}
-
-static inline void dump_save_context(int cpu, const struct pt_regs *regs,
- struct task_struct *tsk)
-{
- dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
-}
-
-static inline int dump_save_this_cpu(const struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
-
- dump_save_context(cpu, regs, current);
- return 1;
-}
-
-static inline int dump_update_header(void)
-{
- return dump_config.dumper->fmt->ops->update_header();
-}
-
-static inline int dump_update_end_marker(void)
-{
- return dump_config.dumper->fmt->ops->update_end_marker();
-}
-
-static inline int dump_add_data(unsigned long loc, unsigned long sz)
-{
- return dump_config.dumper->fmt->ops->add_data(loc, sz);
-}
-
-/* Compression operation */
-static inline int dump_compress_data(char *src, int slen, char *dst)
-{
- return dump_config.dumper->compress->compress_func(src, slen,
- dst, DUMP_DPC_PAGE_SIZE);
-}
-
-
-/* Prototypes of some default implementations of dump methods */
-
-extern struct __dump_compress dump_none_compression;
-
-/* Default scheme methods (dump_scheme.c) */
-
-extern int dump_generic_sequencer(void);
-extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
- long), struct dump_data_filter *filter);
-extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
-extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
-extern int dump_generic_write_buffer(void *buf, unsigned long len);
-extern int dump_generic_configure(unsigned long);
-extern int dump_generic_unconfigure(void);
-
-/* Default scheme template */
-extern struct dump_scheme dump_scheme_singlestage;
-
-/* Default dump format methods */
-
-extern int dump_lcrash_configure_header(const char *panic_str,
- const struct pt_regs *regs);
-extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
- struct task_struct *tsk);
-extern int dump_generic_update_header(void);
-extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
-extern int dump_lcrash_update_end_marker(void);
-
-/* Default format (lcrash) template */
-extern struct dump_fmt dump_fmt_lcrash;
-
-/* Default dump selection filter table */
-
-/*
- * Entries listed in order of importance and correspond to passes
- * The last entry (with a level_mask of zero) typically reflects data that
- * won't be dumped -- this may for example be used to identify data
- * that will be skipped for certain so the corresponding memory areas can be
- * utilized as scratch space.
- */
-extern struct dump_data_filter dump_filter_table[];
-
-/* Some pre-defined dumpers */
-extern struct dumper dumper_singlestage;
-extern struct dumper dumper_stage1;
-extern struct dumper dumper_stage2;
-
-/* These are temporary */
-#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER
-#define DUMP_MASK_KERN DUMP_LEVEL_KERN
-#define DUMP_MASK_USED DUMP_LEVEL_USED
-#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM
-#define DUMP_MASK_REST 0 /* dummy for now */
-
-/* Helpers - move these to dump.h later ? */
-
-int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
-extern int dump_ll_write(void *buf, unsigned long len);
-int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
-
-static inline void dumper_reset(void)
-{
- dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
- dump_config.dumper->curr_loc = 0;
- dump_config.dumper->curr_offset = 0;
- dump_config.dumper->count = 0;
- dump_config.dumper->curr_pass = 0;
-}
-
-/*
- * May later be moulded to perform boot-time allocations so we can dump
- * earlier during bootup
- */
-static inline void *dump_alloc_mem(unsigned long size)
-{
- return kmalloc(size, GFP_KERNEL);
-}
-
-static inline void dump_free_mem(void *buf)
-{
- struct page *page;
-
- /* ignore reserved pages (e.g. post soft boot stage) */
- if (buf && (page = virt_to_page(buf))) {
- if (PageReserved(page))
- return;
- }
-
- kfree(buf);
-}
-
-
-#endif /* _LINUX_DUMP_METHODS_H */
+++ /dev/null
-/*
- * Implements the dump driver interface for saving a dump via network
- * interface.
- *
- * Some of this code has been taken/adapted from Ingo Molnar's netconsole
- * code. LKCD team expresses its thanks to Ingo.
- *
- * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
- * Adapted netconsole code to implement LKCD dump over the network.
- *
- * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
- * Innumerable code cleanups, simplification and some fixes.
- * Netdump configuration done by ioctl instead of using module parameters.
- *
- * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#include <net/tcp.h>
-#include <net/udp.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/reboot.h>
-#include <linux/module.h>
-#include <linux/dump.h>
-#include <linux/dump_netdev.h>
-#include <linux/percpu.h>
-
-#include <asm/unaligned.h>
-
-static int startup_handshake;
-static int page_counter;
-static struct net_device *dump_ndev;
-static struct in_device *dump_in_dev;
-static u16 source_port, target_port;
-static u32 source_ip, target_ip;
-static unsigned char daddr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} ;
-static spinlock_t dump_skb_lock = SPIN_LOCK_UNLOCKED;
-static int dump_nr_skbs;
-static struct sk_buff *dump_skb;
-static unsigned long flags_global;
-static int netdump_in_progress;
-static char device_name[IFNAMSIZ];
-
-/*
- * security depends on the trusted path between the netconsole
- * server and netconsole client, since none of the packets are
- * encrypted. The random magic number protects the protocol
- * against spoofing.
- */
-static u64 dump_magic;
-
-#define MAX_UDP_CHUNK 1460
-#define MAX_PRINT_CHUNK (MAX_UDP_CHUNK-HEADER_LEN)
-
-/*
- * We maintain a small pool of fully-sized skbs,
- * to make sure the message gets out even in
- * extreme OOM situations.
- */
-#define DUMP_MAX_SKBS 32
-
-#define MAX_SKB_SIZE \
- (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
- sizeof(struct iphdr) + sizeof(struct ethhdr))
-
-static void
-dump_refill_skbs(void)
-{
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&dump_skb_lock, flags);
- while (dump_nr_skbs < DUMP_MAX_SKBS) {
- skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
- if (!skb)
- break;
- if (dump_skb)
- skb->next = dump_skb;
- else
- skb->next = NULL;
- dump_skb = skb;
- dump_nr_skbs++;
- }
- spin_unlock_irqrestore(&dump_skb_lock, flags);
-}
-
-static struct
-sk_buff * dump_get_skb(void)
-{
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&dump_skb_lock, flags);
- skb = dump_skb;
- if (skb) {
- dump_skb = skb->next;
- skb->next = NULL;
- dump_nr_skbs--;
- }
- spin_unlock_irqrestore(&dump_skb_lock, flags);
-
- return skb;
-}
-
-/*
- * Zap completed output skbs.
- */
-static void
-zap_completion_queue(void)
-{
- int count;
- unsigned long flags;
- struct softnet_data *sd;
-
- count=0;
- sd = &__get_cpu_var(softnet_data);
- if (sd->completion_queue) {
- struct sk_buff *clist;
-
- local_irq_save(flags);
- clist = sd->completion_queue;
- sd->completion_queue = NULL;
- local_irq_restore(flags);
-
- while (clist != NULL) {
- struct sk_buff *skb = clist;
- clist = clist->next;
- __kfree_skb(skb);
- count++;
- if (count > 10000)
- printk("Error in sk list\n");
- }
- }
-}
-
-static void
-dump_send_skb(struct net_device *dev, const char *msg, unsigned int msg_len,
- reply_t *reply)
-{
- int once = 1;
- int total_len, eth_len, ip_len, udp_len, count = 0;
- struct sk_buff *skb;
- struct udphdr *udph;
- struct iphdr *iph;
- struct ethhdr *eth;
-
- udp_len = msg_len + HEADER_LEN + sizeof(*udph);
- ip_len = eth_len = udp_len + sizeof(*iph);
- total_len = eth_len + ETH_HLEN;
-
-repeat_loop:
- zap_completion_queue();
- if (dump_nr_skbs < DUMP_MAX_SKBS)
- dump_refill_skbs();
-
- skb = alloc_skb(total_len, GFP_ATOMIC);
- if (!skb) {
- skb = dump_get_skb();
- if (!skb) {
- count++;
- if (once && (count == 1000000)) {
- printk("possibly FATAL: out of netconsole "
- "skbs!!! will keep retrying.\n");
- once = 0;
- }
- dev->poll_controller(dev);
- goto repeat_loop;
- }
- }
-
- atomic_set(&skb->users, 1);
- skb_reserve(skb, total_len - msg_len - HEADER_LEN);
- skb->data[0] = NETCONSOLE_VERSION;
-
- put_unaligned(htonl(reply->nr), (u32 *) (skb->data + 1));
- put_unaligned(htonl(reply->code), (u32 *) (skb->data + 5));
- put_unaligned(htonl(reply->info), (u32 *) (skb->data + 9));
-
- memcpy(skb->data + HEADER_LEN, msg, msg_len);
- skb->len += msg_len + HEADER_LEN;
-
- udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
- udph->source = source_port;
- udph->dest = target_port;
- udph->len = htons(udp_len);
- udph->check = 0;
-
- iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
-
- iph->version = 4;
- iph->ihl = 5;
- iph->tos = 0;
- iph->tot_len = htons(ip_len);
- iph->id = 0;
- iph->frag_off = 0;
- iph->ttl = 64;
- iph->protocol = IPPROTO_UDP;
- iph->check = 0;
- iph->saddr = source_ip;
- iph->daddr = target_ip;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-
- eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
- eth->h_proto = htons(ETH_P_IP);
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, daddr, dev->addr_len);
-
- count=0;
-repeat_poll:
- spin_lock(&dev->xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
-
- count++;
-
-
- if (netif_queue_stopped(dev)) {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
-
- dev->poll_controller(dev);
- zap_completion_queue();
-
-
- goto repeat_poll;
- }
-
- dev->hard_start_xmit(skb, dev);
-
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
-}
-
-static unsigned short
-udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr,
- unsigned long base)
-{
- return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
-}
-
-static int
-udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
- unsigned short ulen, u32 saddr, u32 daddr)
-{
- if (uh->check == 0) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
- return 0;
- skb->ip_summed = CHECKSUM_NONE;
- }
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen,
- IPPROTO_UDP, 0);
- /* Probably, we should checksum udp header (it should be in cache
- * in any case) and data in tiny packets (< rx copybreak).
- */
- return 0;
-}
-
-static __inline__ int
-__udp_checksum_complete(struct sk_buff *skb)
-{
- return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len,
- skb->csum));
-}
-
-static __inline__
-int udp_checksum_complete(struct sk_buff *skb)
-{
- return skb->ip_summed != CHECKSUM_UNNECESSARY &&
- __udp_checksum_complete(skb);
-}
-
-int new_req = 0;
-static req_t req;
-
-static int
-dump_rx_hook(struct sk_buff *skb)
-{
- int proto;
- struct iphdr *iph;
- struct udphdr *uh;
- __u32 len, saddr, daddr, ulen;
- req_t *__req;
-
- /*
- * First check if were are dumping or doing startup handshake, if
- * not quickly return.
- */
- if (!netdump_in_progress)
- return NET_RX_SUCCESS;
-
- if (skb->dev->type != ARPHRD_ETHER)
- goto out;
-
- proto = ntohs(skb->mac.ethernet->h_proto);
- if (proto != ETH_P_IP)
- goto out;
-
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto out;
-
- if (skb_shared(skb))
- goto out;
-
- /* IP header correctness testing: */
- iph = (struct iphdr *)skb->data;
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto out;
-
- if (iph->ihl < 5 || iph->version != 4)
- goto out;
-
- if (!pskb_may_pull(skb, iph->ihl*4))
- goto out;
-
- if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
- goto out;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < iph->ihl*4)
- goto out;
-
- saddr = iph->saddr;
- daddr = iph->daddr;
- if (iph->protocol != IPPROTO_UDP)
- goto out;
-
- if (source_ip != daddr)
- goto out;
-
- if (target_ip != saddr)
- goto out;
-
- len -= iph->ihl*4;
- uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
- ulen = ntohs(uh->len);
-
- if (ulen != len || ulen < (sizeof(*uh) + sizeof(*__req)))
- goto out;
-
- if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0)
- goto out;
-
- if (udp_checksum_complete(skb))
- goto out;
-
- if (source_port != uh->dest)
- goto out;
-
- if (target_port != uh->source)
- goto out;
-
- __req = (req_t *)(uh + 1);
- if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
- (ntohl(__req->command) != COMM_HELLO) &&
- (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
- (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
- (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
- goto out;
-
- req.magic = ntohl(__req->magic);
- req.command = ntohl(__req->command);
- req.from = ntohl(__req->from);
- req.to = ntohl(__req->to);
- req.nr = ntohl(__req->nr);
- new_req = 1;
-out:
- return NET_RX_DROP;
-}
-
-static void
-dump_send_mem(struct net_device *dev, req_t *req, const char* buff, size_t len)
-{
- int i;
-
- int nr_chunks = len/1024;
- reply_t reply;
-
- reply.nr = req->nr;
- reply.info = 0;
-
- if ( nr_chunks <= 0)
- nr_chunks = 1;
- for (i = 0; i < nr_chunks; i++) {
- unsigned int offset = i*1024;
- reply.code = REPLY_MEM;
- reply.info = offset;
- dump_send_skb(dev, buff + offset, 1024, &reply);
- }
-}
-
-/*
- * This function waits for the client to acknowledge the receipt
- * of the netdump startup reply, with the possibility of packets
- * getting lost. We resend the startup packet if no ACK is received,
- * after a 1 second delay.
- *
- * (The client can test the success of the handshake via the HELLO
- * command, and send ACKs until we enter netdump mode.)
- */
-static int
-dump_handshake(struct dump_dev *net_dev)
-{
- char tmp[200];
- reply_t reply;
- int i, j;
-
- if (startup_handshake) {
- sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n");
- reply.code = REPLY_START_NETDUMP;
- reply.nr = 0;
- reply.info = 0;
- } else {
- sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n");
- reply.code = REPLY_START_WRITE_NETDUMP;
- reply.nr = net_dev->curr_offset;
- reply.info = net_dev->curr_offset;
- }
-
- /* send 300 handshake packets before declaring failure */
- for (i = 0; i < 300; i++) {
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
-
- /* wait 1 sec */
- for (j = 0; j < 10000; j++) {
- udelay(100);
- dump_ndev->poll_controller(dump_ndev);
- zap_completion_queue();
- if (new_req)
- break;
- }
-
- /*
- * if there is no new request, try sending the handshaking
- * packet again
- */
- if (!new_req)
- continue;
-
- /*
- * check if the new request is of the expected type,
- * if so, return, else try sending the handshaking
- * packet again
- */
- if (startup_handshake) {
- if (req.command == COMM_HELLO || req.command ==
- COMM_START_NETDUMP_ACK) {
- return 0;
- } else {
- new_req = 0;
- continue;
- }
- } else {
- if (req.command == COMM_SEND_MEM) {
- return 0;
- } else {
- new_req = 0;
- continue;
- }
- }
- }
- return -1;
-}
-
-static ssize_t
-do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
-{
- reply_t reply;
- char tmp[200];
- ssize_t ret = 0;
- int repeatCounter, counter, total_loop;
-
- netdump_in_progress = 1;
-
- if (dump_handshake(net_dev) < 0) {
- printk("network dump failed due to handshake failure\n");
- goto out;
- }
-
- /*
- * Ideally startup handshake should be done during dump configuration,
- * i.e., in dump_net_open(). This will be done when I figure out
- * the dependency between startup handshake, subsequent write and
- * various commands wrt to net-server.
- */
- if (startup_handshake)
- startup_handshake = 0;
-
- counter = 0;
- repeatCounter = 0;
- total_loop = 0;
- while (1) {
- if (!new_req) {
- dump_ndev->poll_controller(dump_ndev);
- zap_completion_queue();
- }
- if (!new_req) {
- repeatCounter++;
-
- if (repeatCounter > 5) {
- counter++;
- if (counter > 10000) {
- if (total_loop >= 100000) {
- printk("Time OUT LEAVE NOW\n");
- goto out;
- } else {
- total_loop++;
- printk("Try number %d out of "
- "10 before Time Out\n",
- total_loop);
- }
- }
- mdelay(1);
- repeatCounter = 0;
- }
- continue;
- }
- repeatCounter = 0;
- counter = 0;
- total_loop = 0;
- new_req = 0;
- switch (req.command) {
- case COMM_NONE:
- break;
-
- case COMM_SEND_MEM:
- dump_send_mem(dump_ndev, &req, buff, len);
- break;
-
- case COMM_EXIT:
- case COMM_START_WRITE_NETDUMP_ACK:
- ret = len;
- goto out;
-
- case COMM_HELLO:
- sprintf(tmp, "Hello, this is netdump version "
- "0.%02d\n", NETCONSOLE_VERSION);
- reply.code = REPLY_HELLO;
- reply.nr = req.nr;
- reply.info = net_dev->curr_offset;
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
- break;
-
- case COMM_GET_PAGE_SIZE:
- sprintf(tmp, "PAGE_SIZE: %ld\n", PAGE_SIZE);
- reply.code = REPLY_PAGE_SIZE;
- reply.nr = req.nr;
- reply.info = PAGE_SIZE;
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
- break;
-
- case COMM_GET_NR_PAGES:
- reply.code = REPLY_NR_PAGES;
- reply.nr = req.nr;
- reply.info = num_physpages;
- reply.info = page_counter;
- sprintf(tmp, "Number of pages: %ld\n", num_physpages);
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
- break;
-
- case COMM_GET_MAGIC:
- reply.code = REPLY_MAGIC;
- reply.nr = req.nr;
- reply.info = NETCONSOLE_VERSION;
- dump_send_skb(dump_ndev, (char *)&dump_magic,
- sizeof(dump_magic), &reply);
- break;
-
- default:
- reply.code = REPLY_ERROR;
- reply.nr = req.nr;
- reply.info = req.command;
- sprintf(tmp, "Got unknown command code %d!\n",
- req.command);
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
- break;
- }
- }
-out:
- netdump_in_progress = 0;
- return ret;
-}
-
-static int
-dump_validate_config(void)
-{
- source_ip = dump_in_dev->ifa_list->ifa_local;
- if (!source_ip) {
- printk("network device %s has no local address, "
- "aborting.\n", device_name);
- return -1;
- }
-
-#define IP(x) ((unsigned char *)&source_ip)[x]
- printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
-#undef IP
-
- if (!source_port) {
- printk("source_port parameter not specified, aborting.\n");
- return -1;
- }
- printk(":%i\n", source_port);
- source_port = htons(source_port);
-
- if (!target_ip) {
- printk("target_ip parameter not specified, aborting.\n");
- return -1;
- }
-
-#define IP(x) ((unsigned char *)&target_ip)[x]
- printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
-#undef IP
-
- if (!target_port) {
- printk("target_port parameter not specified, aborting.\n");
- return -1;
- }
- printk(":%i\n", target_port);
- target_port = htons(target_port);
-
- printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
- daddr[0], daddr[1], daddr[2], daddr[3], daddr[4], daddr[5]);
-
- if ((daddr[0] & daddr[1] & daddr[2] & daddr[3] & daddr[4] &
- daddr[5]) == 255)
- printk("(Broadcast)");
- printk("\n");
- return 0;
-}
-
-/*
- * Prepares the dump device so we can take a dump later.
- * Validates the netdump configuration parameters.
- *
- * TODO: Network connectivity check should be done here.
- */
-static int
-dump_net_open(struct dump_dev *net_dev, unsigned long arg)
-{
- int retval = 0;
-
- /* get the interface name */
- if (copy_from_user(device_name, (void *)arg, IFNAMSIZ))
- return -EFAULT;
-
- if (!(dump_ndev = dev_get_by_name(device_name))) {
- printk("network device %s does not exist, aborting.\n",
- device_name);
- return -ENODEV;
- }
-
- if (!dump_ndev->poll_controller) {
- printk("network device %s does not implement polling yet, "
- "aborting.\n", device_name);
- retval = -1; /* return proper error */
- goto err1;
- }
-
- if (!(dump_in_dev = in_dev_get(dump_ndev))) {
- printk("network device %s is not an IP protocol device, "
- "aborting.\n", device_name);
- retval = -EINVAL;
- goto err1;
- }
-
- if ((retval = dump_validate_config()) < 0)
- goto err2;
-
- net_dev->curr_offset = 0;
- printk("Network device %s successfully configured for dumping\n",
- device_name);
- return retval;
-err2:
- in_dev_put(dump_in_dev);
-err1:
- dev_put(dump_ndev);
- return retval;
-}
-
-/*
- * Close the dump device and release associated resources
- * Invoked when unconfiguring the dump device.
- */
-static int
-dump_net_release(struct dump_dev *net_dev)
-{
- if (dump_in_dev)
- in_dev_put(dump_in_dev);
- if (dump_ndev)
- dev_put(dump_ndev);
- return 0;
-}
-
-/*
- * Prepare the dump device for use (silence any ongoing activity
- * and quiesce state) when the system crashes.
- */
-static int
-dump_net_silence(struct dump_dev *net_dev)
-{
- netpoll_set_trap(1);
- local_irq_save(flags_global);
- dump_ndev->rx_hook = dump_rx_hook;
- startup_handshake = 1;
- net_dev->curr_offset = 0;
- printk("Dumping to network device %s on CPU %d ...\n", device_name,
- smp_processor_id());
- return 0;
-}
-
-/*
- * Invoked when dumping is done. This is the time to put things back
- * (i.e. undo the effects of dump_block_silence) so the device is
- * available for normal use.
- */
-static int
-dump_net_resume(struct dump_dev *net_dev)
-{
- int indx;
- reply_t reply;
- char tmp[200];
-
- if (!dump_ndev)
- return (0);
-
- sprintf(tmp, "NETDUMP end.\n");
- for( indx = 0; indx < 6; indx++) {
- reply.code = REPLY_END_NETDUMP;
- reply.nr = 0;
- reply.info = 0;
- dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
- }
- printk("NETDUMP END!\n");
- local_irq_restore(flags_global);
- netpoll_set_trap(0);
- dump_ndev->rx_hook = NULL;
- startup_handshake = 0;
- return 0;
-}
-
-/*
- * Seek to the specified offset in the dump device.
- * Makes sure this is a valid offset, otherwise returns an error.
- */
-static int
-dump_net_seek(struct dump_dev *net_dev, loff_t off)
-{
- /*
- * For now using DUMP_HEADER_OFFSET as hard coded value,
- * See dump_block_seekin dump_blockdev.c to know how to
- * do this properly.
- */
- net_dev->curr_offset = off;
- return 0;
-}
-
-/*
- *
- */
-static int
-dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
-{
- int cnt, i, off;
- ssize_t ret;
-
- cnt = len/ PAGE_SIZE;
-
- for (i = 0; i < cnt; i++) {
- off = i* PAGE_SIZE;
- ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
- if (ret <= 0)
- return -1;
- net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
- }
- return len;
-}
-
-/*
- * check if the last dump i/o is over and ready for next request
- */
-static int
-dump_net_ready(struct dump_dev *net_dev, void *buf)
-{
- return 0;
-}
-
-/*
- * ioctl function used for configuring network dump
- */
-static int
-dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case DIOSTARGETIP:
- target_ip = arg;
- break;
- case DIOSTARGETPORT:
- target_port = (u16)arg;
- break;
- case DIOSSOURCEPORT:
- source_port = (u16)arg;
- break;
- case DIOSETHADDR:
- return copy_from_user(daddr, (void *)arg, 6);
- break;
- case DIOGTARGETIP:
- case DIOGTARGETPORT:
- case DIOGSOURCEPORT:
- case DIOGETHADDR:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-struct dump_dev_ops dump_netdev_ops = {
- .open = dump_net_open,
- .release = dump_net_release,
- .silence = dump_net_silence,
- .resume = dump_net_resume,
- .seek = dump_net_seek,
- .write = dump_net_write,
- /* .read not implemented */
- .ready = dump_net_ready,
- .ioctl = dump_net_ioctl
-};
-
-static struct dump_dev default_dump_netdev = {
- .type_name = "networkdev",
- .ops = &dump_netdev_ops,
- .curr_offset = 0
-};
-
-static int __init
-dump_netdev_init(void)
-{
- default_dump_netdev.curr_offset = 0;
-
- if (dump_register_device(&default_dump_netdev) < 0) {
- printk("network dump device driver registration failed\n");
- return -1;
- }
- printk("network device driver for LKCD registered\n");
-
- get_random_bytes(&dump_magic, sizeof(dump_magic));
- return 0;
-}
-
-static void __exit
-dump_netdev_cleanup(void)
-{
- dump_unregister_device(&default_dump_netdev);
-}
-
-MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
-MODULE_LICENSE("GPL");
-
-module_init(dump_netdev_init);
-module_exit(dump_netdev_cleanup);
+++ /dev/null
-/*
- * Two-stage soft-boot based dump scheme methods (memory overlay
- * with post soft-boot writeout)
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- *
- * This approach of saving the dump in memory and writing it
- * out after a softboot without clearing memory is derived from the
- * Mission Critical Linux dump implementation. Credits and a big
- * thanks for letting the lkcd project make use of the excellent
- * piece of work and also for helping with clarifications and
- * tips along the way are due to:
- * Dave Winchell <winchell@mclx.com> (primary author of mcore)
- * and also to
- * Jeff Moyer <moyer@mclx.com>
- * Josh Huber <huber@mclx.com>
- *
- * For those familiar with the mcore implementation, the key
- * differences/extensions here are in allowing entire memory to be
- * saved (in compressed form) through a careful ordering scheme
- * on both the way down as well on the way up after boot, the latter
- * for supporting the LKCD notion of passes in which most critical
- * data is the first to be saved to the dump device. Also the post
- * boot writeout happens from within the kernel rather than driven
- * from userspace.
- *
- * The sequence is orchestrated through the abstraction of "dumpers",
- * one for the first stage which then sets up the dumper for the next
- * stage, providing for a smooth and flexible reuse of the singlestage
- * dump scheme methods and a handle to pass dump device configuration
- * information across the soft boot.
- *
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * Disruptive dumping using the second kernel soft-boot option
- * for issuing dump i/o operates in 2 stages:
- *
- * (1) - Saves the (compressed & formatted) dump in memory using a
- * carefully ordered overlay scheme designed to capture the
- * entire physical memory or selective portions depending on
- * dump config settings,
- * - Registers the stage 2 dumper and
- * - Issues a soft reboot w/o clearing memory.
- *
- * The overlay scheme starts with a small bootstrap free area
- * and follows a reverse ordering of passes wherein it
- * compresses and saves data starting with the least critical
- * areas first, thus freeing up the corresponding pages to
- * serve as destination for subsequent data to be saved, and
- * so on. With a good compression ratio, this makes it feasible
- * to capture an entire physical memory dump without significantly
- * reducing memory available during regular operation.
- *
- * (2) Post soft-reboot, runs through the saved memory dump and
- * writes it out to disk, this time around, taking care to
- * save the more critical data first (i.e. pages which figure
- * in early passes for a regular dump). Finally issues a
- * clean reboot.
- *
- * Since the data was saved in memory after selection/filtering
- * and formatted as per the chosen output dump format, at this
- * stage the filter and format actions are just dummy (or
- * passthrough) actions, except for influence on ordering of
- * passes.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/highmem.h>
-#include <linux/bootmem.h>
-#include <linux/dump.h>
-#ifdef CONFIG_KEXEC
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/kexec.h>
-#endif
-#include "dump_methods.h"
-
-extern struct list_head dumper_list_head;
-extern struct dump_memdev *dump_memdev;
-extern struct dumper dumper_stage2;
-struct dump_config_block *dump_saved_config = NULL;
-extern struct dump_blockdev *dump_blockdev;
-static struct dump_memdev *saved_dump_memdev = NULL;
-static struct dumper *saved_dumper = NULL;
-
-#ifdef CONFIG_KEXEC
-extern int panic_timeout;
-#endif
-
-/* For testing
-extern void dump_display_map(struct dump_memdev *);
-*/
-
-struct dumper *dumper_by_name(char *name)
-{
-#ifdef LATER
- struct dumper *dumper;
- list_for_each_entry(dumper, &dumper_list_head, dumper_list)
- if (!strncmp(dumper->name, name, 32))
- return dumper;
-
- /* not found */
- return NULL;
-#endif
- /* Temporary proof of concept */
- if (!strncmp(dumper_stage2.name, name, 32))
- return &dumper_stage2;
- else
- return NULL;
-}
-
-#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-extern void dump_early_reserve_map(struct dump_memdev *);
-
-void crashdump_reserve(void)
-{
- extern unsigned long crashdump_addr;
-
- if (crashdump_addr == 0xdeadbeef)
- return;
-
- /* reserve dump config and saved dump pages */
- dump_saved_config = (struct dump_config_block *)crashdump_addr;
- /* magic verification */
- if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
- printk("Invalid dump magic. Ignoring dump\n");
- dump_saved_config = NULL;
- return;
- }
-
- printk("Dump may be available from previous boot\n");
-
- reserve_bootmem(virt_to_phys((void *)crashdump_addr),
- PAGE_ALIGN(sizeof(struct dump_config_block)));
- dump_early_reserve_map(&dump_saved_config->memdev);
-
-}
-#endif
-
-/*
- * Loads the dump configuration from a memory block saved across soft-boot
- * The ops vectors need fixing up as the corresp. routines may have
- * relocated in the new soft-booted kernel.
- */
-int dump_load_config(struct dump_config_block *config)
-{
- struct dumper *dumper;
- struct dump_data_filter *filter_table, *filter;
- struct dump_dev *dev;
- int i;
-
- if (config->magic != DUMP_MAGIC_LIVE)
- return -ENOENT; /* not a valid config */
-
- /* initialize generic config data */
- memcpy(&dump_config, &config->config, sizeof(dump_config));
-
- /* initialize dumper state */
- if (!(dumper = dumper_by_name(config->dumper.name))) {
- printk("dumper name mismatch\n");
- return -ENOENT; /* dumper mismatch */
- }
-
- /* verify and fixup schema */
- if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
- printk("dumper scheme mismatch\n");
- return -ENOENT; /* mismatch */
- }
- config->scheme.ops = dumper->scheme->ops;
- config->dumper.scheme = &config->scheme;
-
- /* verify and fixup filter operations */
- filter_table = dumper->filter;
- for (i = 0, filter = config->filter_table;
- ((i < MAX_PASSES) && filter_table[i].selector);
- i++, filter++) {
- if (strncmp(filter_table[i].name, filter->name, 32)) {
- printk("dump filter mismatch\n");
- return -ENOENT; /* filter name mismatch */
- }
- filter->selector = filter_table[i].selector;
- }
- config->dumper.filter = config->filter_table;
-
- /* fixup format */
- if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
- printk("dump format mismatch\n");
- return -ENOENT; /* mismatch */
- }
- config->fmt.ops = dumper->fmt->ops;
- config->dumper.fmt = &config->fmt;
-
- /* fixup target device */
- dev = (struct dump_dev *)(&config->dev[0]);
- if (dumper->dev == NULL) {
- pr_debug("Vanilla dumper - assume default\n");
- if (dump_dev == NULL)
- return -ENODEV;
- dumper->dev = dump_dev;
- }
-
- if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
- printk("dump dev type mismatch %s instead of %s\n",
- dev->type_name, dumper->dev->type_name);
- return -ENOENT; /* mismatch */
- }
- dev->ops = dumper->dev->ops;
- config->dumper.dev = dev;
-
- /* fixup memory device containing saved dump pages */
- /* assume statically init'ed dump_memdev */
- config->memdev.ddev.ops = dump_memdev->ddev.ops;
- /* switch to memdev from prev boot */
- saved_dump_memdev = dump_memdev; /* remember current */
- dump_memdev = &config->memdev;
-
- /* Make this the current primary dumper */
- dump_config.dumper = &config->dumper;
-
- return 0;
-}
-
-/* Saves the dump configuration in a memory block for use across a soft-boot */
-int dump_save_config(struct dump_config_block *config)
-{
- printk("saving dump config settings\n");
-
- /* dump config settings */
- memcpy(&config->config, &dump_config, sizeof(dump_config));
-
- /* dumper state */
- memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
- memcpy(&config->scheme, dump_config.dumper->scheme,
- sizeof(struct dump_scheme));
- memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
- memcpy(&config->dev[0], dump_config.dumper->dev,
- sizeof(struct dump_anydev));
- memcpy(&config->filter_table, dump_config.dumper->filter,
- sizeof(struct dump_data_filter)*MAX_PASSES);
-
- /* handle to saved mem pages */
- memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
-
- config->magic = DUMP_MAGIC_LIVE;
-
- return 0;
-}
-
-int dump_init_stage2(struct dump_config_block *saved_config)
-{
- int err = 0;
-
- pr_debug("dump_init_stage2\n");
- /* Check if dump from previous boot exists */
- if (saved_config) {
- printk("loading dumper from previous boot \n");
- /* load and configure dumper from previous boot */
- if ((err = dump_load_config(saved_config)))
- return err;
-
- if (!dump_oncpu) {
- if ((err = dump_configure(dump_config.dump_device))) {
- printk("Stage 2 dump configure failed\n");
- return err;
- }
- }
-
- dumper_reset();
- dump_dev = dump_config.dumper->dev;
- /* write out the dump */
- err = dump_generic_execute(NULL, NULL);
-
- dump_saved_config = NULL;
-
- if (!dump_oncpu) {
- dump_unconfigure();
- }
-
- return err;
-
- } else {
- /* no dump to write out */
- printk("no dumper from previous boot \n");
- return 0;
- }
-}
-
-extern void dump_mem_markpages(struct dump_memdev *);
-
-int dump_switchover_stage(void)
-{
- int ret = 0;
-
- /* trigger stage 2 rightaway - in real life would be after soft-boot */
- /* dump_saved_config would be a boot param */
- saved_dump_memdev = dump_memdev;
- saved_dumper = dump_config.dumper;
- ret = dump_init_stage2(dump_saved_config);
- dump_memdev = saved_dump_memdev;
- dump_config.dumper = saved_dumper;
- return ret;
-}
-
-int dump_activate_softboot(void)
-{
- int err = 0;
-#ifdef CONFIG_KEXEC
- int num_cpus_online = 0;
- struct kimage *image;
-#endif
-
- /* temporary - switchover to writeout previously saved dump */
-#ifndef CONFIG_KEXEC
- err = dump_switchover_stage(); /* non-disruptive case */
- if (dump_oncpu)
- dump_config.dumper = &dumper_stage1; /* set things back */
-
- return err;
-#else
-
- dump_silence_level = DUMP_HALT_CPUS;
- /* wait till we become the only cpu */
- /* maybe by checking for online cpus ? */
-
- while((num_cpus_online = num_online_cpus()) > 1);
-
- /* now call into kexec */
-
- image = xchg(&kexec_image, 0);
- if (image) {
- mdelay(panic_timeout*1000);
- machine_kexec(image);
- }
-
-
- /* TBD/Fixme:
- * * should we call reboot notifiers ? inappropriate for panic ?
- * * what about device_shutdown() ?
- * * is explicit bus master disabling needed or can we do that
- * * through driverfs ?
- * */
- return 0;
-#endif
-}
-
-/* --- DUMP SCHEME ROUTINES --- */
-
-static inline int dump_buf_pending(struct dumper *dumper)
-{
- return (dumper->curr_buf - dumper->dump_buf);
-}
-
-/* Invoked during stage 1 of soft-reboot based dumping */
-int dump_overlay_sequencer(void)
-{
- struct dump_data_filter *filter = dump_config.dumper->filter;
- struct dump_data_filter *filter2 = dumper_stage2.filter;
- int pass = 0, err = 0, save = 0;
- int (*action)(unsigned long, unsigned long);
-
- /* Make sure gzip compression is being used */
- if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
- printk(" Please set GZIP compression \n");
- return -EINVAL;
- }
-
- /* start filling in dump data right after the header */
- dump_config.dumper->curr_offset =
- PAGE_ALIGN(dump_config.dumper->header_len);
-
- /* Locate the last pass */
- for (;filter->selector; filter++, pass++);
-
- /*
- * Start from the end backwards: overlay involves a reverse
- * ordering of passes, since less critical pages are more
- * likely to be reusable as scratch space once we are through
- * with them.
- */
- for (--pass, --filter; pass >= 0; pass--, filter--)
- {
- /* Assumes passes are exclusive (even across dumpers) */
- /* Requires care when coding the selection functions */
- if ((save = filter->level_mask & dump_config.level))
- action = dump_save_data;
- else
- action = dump_skip_data;
-
- /* Remember the offset where this pass started */
- /* The second stage dumper would use this */
- if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
- pr_debug("Starting pass %d with pending data\n", pass);
- pr_debug("filling dummy data to page-align it\n");
- dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
- (unsigned long)dump_config.dumper->curr_buf);
- }
-
- filter2[pass].start[0] = dump_config.dumper->curr_offset
- + dump_buf_pending(dump_config.dumper);
-
- err = dump_iterator(pass, action, filter);
-
- filter2[pass].end[0] = dump_config.dumper->curr_offset
- + dump_buf_pending(dump_config.dumper);
- filter2[pass].num_mbanks = 1;
-
- if (err < 0) {
- printk("dump_overlay_seq: failure %d in pass %d\n",
- err, pass);
- break;
- }
- printk("\n %d overlay pages %s of %d each in pass %d\n",
- err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
- }
-
- return err;
-}
-
-/* from dump_memdev.c */
-extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
-extern struct page *dump_mem_next_page(struct dump_memdev *dev);
-
-static inline struct page *dump_get_saved_page(loff_t loc)
-{
- return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
-}
-
-static inline struct page *dump_next_saved_page(void)
-{
- return (dump_mem_next_page(dump_memdev));
-}
-
-/*
- * Iterates over list of saved dump pages. Invoked during second stage of
- * soft boot dumping
- *
- * Observation: If additional selection is desired at this stage then
- * a different iterator could be written which would advance
- * to the next page header everytime instead of blindly picking up
- * the data. In such a case loc would be interpreted differently.
- * At this moment however a blind pass seems sufficient, cleaner and
- * faster.
- */
-int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
- unsigned long), struct dump_data_filter *filter)
-{
- loff_t loc, end;
- struct page *page;
- unsigned long count = 0;
- int i, err = 0;
- unsigned long sz;
-
- for (i = 0; i < filter->num_mbanks; i++) {
- loc = filter->start[i];
- end = filter->end[i];
- printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
- loc, end);
-
- /* loc will get treated as logical offset into stage 1 */
- page = dump_get_saved_page(loc);
-
- for (; loc < end; loc += PAGE_SIZE) {
- dump_config.dumper->curr_loc = loc;
- if (!page) {
- printk("no more saved data for pass %d\n",
- pass);
- break;
- }
- sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
-
- if (page && filter->selector(pass, (unsigned long)page,
- PAGE_SIZE)) {
- pr_debug("mem offset 0x%llx\n", loc);
- if ((err = action((unsigned long)page, sz)))
- break;
- else
- count++;
- /* clear the contents of page */
- /* fixme: consider using KM_DUMP instead */
- clear_highpage(page);
-
- }
- page = dump_next_saved_page();
- }
- }
-
- return err ? err : count;
-}
-
-static inline int dump_overlay_pages_done(struct page *page, int nr)
-{
- int ret=0;
-
- for (; nr ; page++, nr--) {
- if (dump_check_and_free_page(dump_memdev, page))
- ret++;
- }
- return ret;
-}
-
-int dump_overlay_save_data(unsigned long loc, unsigned long len)
-{
- int err = 0;
- struct page *page = (struct page *)loc;
- static unsigned long cnt = 0;
-
- if ((err = dump_generic_save_data(loc, len)))
- return err;
-
- if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
- cnt++;
- if (!(cnt & 0x7f))
- pr_debug("released page 0x%lx\n", page_to_pfn(page));
- }
-
- return err;
-}
-
-
-int dump_overlay_skip_data(unsigned long loc, unsigned long len)
-{
- struct page *page = (struct page *)loc;
-
- dump_overlay_pages_done(page, len >> PAGE_SHIFT);
- return 0;
-}
-
-int dump_overlay_resume(void)
-{
- int err = 0;
-
- /*
- * switch to stage 2 dumper, save dump_config_block
- * and then trigger a soft-boot
- */
- dumper_stage2.header_len = dump_config.dumper->header_len;
- dump_config.dumper = &dumper_stage2;
- if ((err = dump_save_config(dump_saved_config)))
- return err;
-
- dump_dev = dump_config.dumper->dev;
-
-#ifdef CONFIG_KEXEC
- /* If we are doing a disruptive dump, activate softboot now */
- if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
- err = dump_activate_softboot();
-#endif
-
- return err;
- err = dump_switchover_stage(); /* plugs into soft boot mechanism */
- dump_config.dumper = &dumper_stage1; /* set things back */
- return err;
-}
-
-int dump_overlay_configure(unsigned long devid)
-{
- struct dump_dev *dev;
- struct dump_config_block *saved_config = dump_saved_config;
- int err = 0;
-
- /* If there is a previously saved dump, write it out first */
- if (saved_config) {
- printk("Processing old dump pending writeout\n");
- err = dump_switchover_stage();
- if (err) {
- printk("failed to writeout saved dump\n");
- return err;
- }
- dump_free_mem(saved_config); /* testing only: not after boot */
- }
-
- dev = dumper_stage2.dev = dump_config.dumper->dev;
- /* From here on the intermediate dump target is memory-only */
- dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
- if ((err = dump_generic_configure(0))) {
- printk("dump generic configure failed: err %d\n", err);
- return err;
- }
- /* temporary */
- dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
-
- /* Sanity check on the actual target dump device */
- if (!dev || (err = dev->ops->open(dev, devid))) {
- return err;
- }
- /* TBD: should we release the target if this is soft-boot only ? */
-
- /* alloc a dump config block area to save across reboot */
- if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
- dump_config_block)))) {
- printk("dump config block alloc failed\n");
- /* undo configure */
- dump_generic_unconfigure();
- return -ENOMEM;
- }
- dump_config.dump_addr = (unsigned long)dump_saved_config;
- printk("Dump config block of size %d set up at 0x%lx\n",
- sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
- return 0;
-}
-
-int dump_overlay_unconfigure(void)
-{
- struct dump_dev *dev = dumper_stage2.dev;
- int err = 0;
-
- pr_debug("dump_overlay_unconfigure\n");
- /* Close the secondary device */
- dev->ops->release(dev);
- pr_debug("released secondary device\n");
-
- err = dump_generic_unconfigure();
- pr_debug("Unconfigured generic portions\n");
- dump_free_mem(dump_saved_config);
- dump_saved_config = NULL;
- pr_debug("Freed saved config block\n");
- dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
-
- printk("Unconfigured overlay dumper\n");
- return err;
-}
-
-int dump_staged_unconfigure(void)
-{
- int err = 0;
- struct dump_config_block *saved_config = dump_saved_config;
- struct dump_dev *dev;
-
- pr_debug("dump_staged_unconfigure\n");
- err = dump_generic_unconfigure();
-
- /* now check if there is a saved dump waiting to be written out */
- if (saved_config) {
- printk("Processing saved dump pending writeout\n");
- if ((err = dump_switchover_stage())) {
- printk("Error in commiting saved dump at 0x%lx\n",
- (unsigned long)saved_config);
- printk("Old dump may hog memory\n");
- } else {
- dump_free_mem(saved_config);
- pr_debug("Freed saved config block\n");
- }
- dump_saved_config = NULL;
- } else {
- dev = &dump_memdev->ddev;
- dev->ops->release(dev);
- }
- printk("Unconfigured second stage dumper\n");
-
- return 0;
-}
-
-/* ----- PASSTHRU FILTER ROUTINE --------- */
-
-/* transparent - passes everything through */
-int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
-{
- return 1;
-}
-
-/* ----- PASSTRU FORMAT ROUTINES ---- */
-
-
-int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
-{
- dump_config.dumper->header_dirty++;
- return 0;
-}
-
-/* Copies bytes of data from page(s) to the specified buffer */
-int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
-{
- unsigned long len = 0, bytes;
- void *addr;
-
- while (len < sz) {
- addr = kmap_atomic(page, KM_DUMP);
- bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
- memcpy(buf, addr, bytes);
- kunmap_atomic(addr, KM_DUMP);
- buf += bytes;
- len += bytes;
- page++;
- }
- /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
-
- return sz - len;
-}
-
-int dump_passthru_update_header(void)
-{
- long len = dump_config.dumper->header_len;
- struct page *page;
- void *buf = dump_config.dumper->dump_buf;
- int err = 0;
-
- if (!dump_config.dumper->header_dirty)
- return 0;
-
- pr_debug("Copying header of size %ld bytes from memory\n", len);
- if (len > DUMP_BUFFER_SIZE)
- return -E2BIG;
-
- page = dump_mem_lookup(dump_memdev, 0);
- for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
- if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
- return err;
- page = dump_mem_next_page(dump_memdev);
- }
- if (len > 0) {
- printk("Incomplete header saved in mem\n");
- return -ENOENT;
- }
-
- if ((err = dump_dev_seek(0))) {
- printk("Unable to seek to dump header offset\n");
- return err;
- }
- err = dump_ll_write(dump_config.dumper->dump_buf,
- buf - dump_config.dumper->dump_buf);
- if (err < dump_config.dumper->header_len)
- return (err < 0) ? err : -ENOSPC;
-
- dump_config.dumper->header_dirty = 0;
- return 0;
-}
-
-static loff_t next_dph_offset = 0;
-
-static int dph_valid(struct __dump_page *dph)
-{
- if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
- > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
- (dph->dp_size > PAGE_SIZE)) {
- printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
- dph->dp_address, dph->dp_size, dph->dp_flags);
- return 0;
- }
- return 1;
-}
-
-int dump_verify_lcrash_data(void *buf, unsigned long sz)
-{
- struct __dump_page *dph;
-
- /* sanity check for page headers */
- while (next_dph_offset + sizeof(*dph) < sz) {
- dph = (struct __dump_page *)(buf + next_dph_offset);
- if (!dph_valid(dph)) {
- printk("Invalid page hdr at offset 0x%llx\n",
- next_dph_offset);
- return -EINVAL;
- }
- next_dph_offset += dph->dp_size + sizeof(*dph);
- }
-
- next_dph_offset -= sz;
- return 0;
-}
-
-/*
- * TBD/Later: Consider avoiding the copy by using a scatter/gather
- * vector representation for the dump buffer
- */
-int dump_passthru_add_data(unsigned long loc, unsigned long sz)
-{
- struct page *page = (struct page *)loc;
- void *buf = dump_config.dumper->curr_buf;
- int err = 0;
-
- if ((err = dump_copy_pages(buf, page, sz))) {
- printk("dump_copy_pages failed");
- return err;
- }
-
- if ((err = dump_verify_lcrash_data(buf, sz))) {
- printk("dump_verify_lcrash_data failed\n");
- printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
- printk("Page flags 0x%lx\n", page->flags);
- printk("Page count 0x%x\n", atomic_read(&page->count));
- return err;
- }
-
- dump_config.dumper->curr_buf = buf + sz;
-
- return 0;
-}
-
-
-/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
-
-/* Scheme to overlay saved data in memory for writeout after a soft-boot */
-struct dump_scheme_ops dump_scheme_overlay_ops = {
- .configure = dump_overlay_configure,
- .unconfigure = dump_overlay_unconfigure,
- .sequencer = dump_overlay_sequencer,
- .iterator = dump_page_iterator,
- .save_data = dump_overlay_save_data,
- .skip_data = dump_overlay_skip_data,
- .write_buffer = dump_generic_write_buffer
-};
-
-struct dump_scheme dump_scheme_overlay = {
- .name = "overlay",
- .ops = &dump_scheme_overlay_ops
-};
-
-
-/* Stage 1 must use a good compression scheme - default to gzip */
-extern struct __dump_compress dump_gzip_compression;
-
-struct dumper dumper_stage1 = {
- .name = "stage1",
- .scheme = &dump_scheme_overlay,
- .fmt = &dump_fmt_lcrash,
- .compress = &dump_none_compression, /* needs to be gzip */
- .filter = dump_filter_table,
- .dev = NULL,
-};
-
-/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
-
-/* Formatter that transfers data as is (transparent) w/o further conversion */
-struct dump_fmt_ops dump_fmt_passthru_ops = {
- .configure_header = dump_passthru_configure_header,
- .update_header = dump_passthru_update_header,
- .save_context = NULL, /* unused */
- .add_data = dump_passthru_add_data,
- .update_end_marker = dump_lcrash_update_end_marker
-};
-
-struct dump_fmt dump_fmt_passthru = {
- .name = "passthru",
- .ops = &dump_fmt_passthru_ops
-};
-
-/* Filter that simply passes along any data within the range (transparent)*/
-/* Note: The start and end ranges in the table are filled in at run-time */
-
-extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
-
-struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
-{.name = "passkern", .selector = dump_passthru_filter,
- .level_mask = DUMP_MASK_KERN },
-{.name = "passuser", .selector = dump_passthru_filter,
- .level_mask = DUMP_MASK_USED },
-{.name = "passunused", .selector = dump_passthru_filter,
- .level_mask = DUMP_MASK_UNUSED },
-{.name = "none", .selector = dump_filter_none,
- .level_mask = DUMP_MASK_REST }
-};
-
-
-/* Scheme to handle data staged / preserved across a soft-boot */
-struct dump_scheme_ops dump_scheme_staged_ops = {
- .configure = dump_generic_configure,
- .unconfigure = dump_staged_unconfigure,
- .sequencer = dump_generic_sequencer,
- .iterator = dump_saved_data_iterator,
- .save_data = dump_generic_save_data,
- .skip_data = dump_generic_skip_data,
- .write_buffer = dump_generic_write_buffer
-};
-
-struct dump_scheme dump_scheme_staged = {
- .name = "staged",
- .ops = &dump_scheme_staged_ops
-};
-
-/* The stage 2 dumper comprising all these */
-struct dumper dumper_stage2 = {
- .name = "stage2",
- .scheme = &dump_scheme_staged,
- .fmt = &dump_fmt_passthru,
- .compress = &dump_none_compression,
- .filter = dump_passthru_filtertable,
- .dev = NULL,
-};
-
+++ /dev/null
-/*
- * Architecture specific (ppc64) functions for Linux crash dumps.
- *
- * Created by: Matt Robinson (yakker@sgi.com)
- *
- * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
- *
- * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
- * Copyright 2000 TurboLinux, Inc. All rights reserved.
- * Copyright 2003, 2004 IBM Corporation
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * The hooks for dumping the kernel virtual memory to disk are in this
- * file. Any time a modification is made to the virtual memory mechanism,
- * these routines must be changed to use the new mechanisms.
- */
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/dump.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/syscalls.h>
-#include <linux/ioctl32.h>
-#include <asm/hardirq.h>
-#include "dump_methods.h"
-#include <linux/irq.h>
-#include <asm/machdep.h>
-#include <asm/uaccess.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
-#include <linux/kdb.h>
-#endif
-
-extern cpumask_t irq_affinity[];
-
-static cpumask_t saved_affinity[NR_IRQS];
-
-static __s32 saved_irq_count; /* saved preempt_count() flags */
-
-static int alloc_dha_stack(void)
-{
- int i;
- void *ptr;
-
- if (dump_header_asm.dha_stack[0])
- return 0;
-
- ptr = (void *)vmalloc(THREAD_SIZE * num_online_cpus());
- if (!ptr) {
- return -ENOMEM;
- }
-
- for (i = 0; i < num_online_cpus(); i++) {
- dump_header_asm.dha_stack[i] =
- (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
- }
- return 0;
-}
-
-static int free_dha_stack(void)
-{
- if (dump_header_asm.dha_stack[0]) {
- vfree((void*)dump_header_asm.dha_stack[0]);
- dump_header_asm.dha_stack[0] = 0;
- }
- return 0;
-}
-#ifdef CONFIG_SMP
-static int dump_expect_ipi[NR_CPUS];
-static atomic_t waiting_for_dump_ipi;
-
-extern void stop_this_cpu(void *);
-static int
-dump_ipi_handler(struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
-
- if (!dump_expect_ipi[cpu])
- return 0;
- dump_save_this_cpu(regs);
- atomic_dec(&waiting_for_dump_ipi);
-
- level_changed:
- switch (dump_silence_level) {
- case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
- while (dump_oncpu) {
- barrier(); /* paranoia */
- if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
- goto level_changed;
- cpu_relax(); /* kill time nicely */
- }
- break;
-
- case DUMP_HALT_CPUS: /* Execute halt */
- stop_this_cpu(NULL);
- break;
-
- case DUMP_SOFT_SPIN_CPUS:
- /* Mark the task so it spins in schedule */
- set_tsk_thread_flag(current, TIF_NEED_RESCHED);
- break;
- }
-
- return 1;
-}
-
-/* save registers on other processors
- * If the other cpus don't respond we simply do not get their states.
- */
-void
-__dump_save_other_cpus(void)
-{
- int i, cpu = smp_processor_id();
- int other_cpus = num_online_cpus()-1;
-
- if (other_cpus > 0) {
- atomic_set(&waiting_for_dump_ipi, other_cpus);
- for (i = 0; i < NR_CPUS; i++)
- dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-
- dump_send_ipi(dump_ipi_handler);
- /*
- * may be we dont need to wait for NMI to be processed.
- * just write out the header at the end of dumping, if
- * this IPI is not processed until then, there probably
- * is a problem and we just fail to capture state of
- * other cpus.
- */
- while (atomic_read(&waiting_for_dump_ipi) > 0) {
- cpu_relax();
- }
- dump_send_ipi(NULL); /* clear handler */
- }
-}
-
-/*
- * Restore old irq affinities.
- */
-static void
-__dump_reset_irq_affinity(void)
-{
- int i;
- irq_desc_t *irq_d;
-
- memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
-
- for_each_irq(i) {
- irq_d = get_irq_desc(i);
- if (irq_d->handler == NULL) {
- continue;
- }
- if (irq_d->handler->set_affinity != NULL) {
- irq_d->handler->set_affinity(i, saved_affinity[i]);
- }
- }
-}
-
-/*
- * Routine to save the old irq affinities and change affinities of all irqs to
- * the dumping cpu.
- *
- * NB: Need to be expanded to multiple nodes.
- */
-static void
-__dump_set_irq_affinity(void)
-{
- int i;
- cpumask_t cpu = CPU_MASK_NONE;
- irq_desc_t *irq_d;
-
- cpu_set(smp_processor_id(), cpu);
-
- memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
-
- for_each_irq(i) {
- irq_d = get_irq_desc(i);
- if (irq_d->handler == NULL) {
- continue;
- }
- irq_affinity[i] = cpu;
- if (irq_d->handler->set_affinity != NULL) {
- irq_d->handler->set_affinity(i, irq_affinity[i]);
- }
- }
-}
-#else /* !CONFIG_SMP */
-#define __dump_save_other_cpus() do { } while (0)
-#define __dump_set_irq_affinity() do { } while (0)
-#define __dump_reset_irq_affinity() do { } while (0)
-#endif /* !CONFIG_SMP */
-
-void
-__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
-{
- if (regs) {
- memcpy(dest_regs, regs, sizeof(struct pt_regs));
- }
-}
-
-/*
- * Name: __dump_configure_header()
- * Func: Configure the dump header with all proper values.
- */
-int
-__dump_configure_header(const struct pt_regs *regs)
-{
- return (0);
-}
-
-#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
-int
-kdb_sysdump(int argc, const char **argv, const char **envp, struct pt_regs *regs)
-{
- kdb_printf("Dumping to disk...\n");
- dump("dump from kdb", regs);
- kdb_printf("Dump Complete\n");
- return 0;
-}
-#endif
-
-static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
- struct file *f)
-{
- mm_segment_t old_fs = get_fs();
- int err;
- unsigned long val;
-
- set_fs (KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long)&val);
- set_fs (old_fs);
- if (!err && put_user((unsigned int) val, (u32 *)arg))
- return -EFAULT;
- return err;
-}
-
-/*
- * Name: __dump_init()
- * Func: Initialize the dumping routine process. This is in case
- * it's necessary in the future.
- */
-void
-__dump_init(uint64_t local_memory_start)
-{
- int ret;
-
- ret = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
- ret |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
- ret |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
- ret |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
- ret |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
- ret |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
- ret |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
- ret |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
- ret |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
- ret |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
- ret |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
- ret |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
- ret |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
- ret |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
- ret |= register_ioctl32_conversion(DIOSETHADDR, NULL);
- ret |= register_ioctl32_conversion(DIOGETHADDR, NULL);
- ret |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
- ret |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
- if (ret) {
- printk(KERN_ERR "LKCD: registering ioctl32 translations failed\n");
- }
-
-#if defined(FIXME) && defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
- /* This won't currently work because interrupts are off in kdb
- * and the dump process doesn't understand how to recover.
- */
- /* ToDo: add a command to query/set dump configuration */
- kdb_register_repeat("sysdump", kdb_sysdump, "", "use lkcd to dump the system to disk (if configured)", 0, KDB_REPEAT_NONE);
-#endif
-
- /* return */
- return;
-}
-
-/*
- * Name: __dump_open()
- * Func: Open the dump device (architecture specific). This is in
- * case it's necessary in the future.
- */
-void
-__dump_open(void)
-{
- alloc_dha_stack();
-}
-
-
-/*
- * Name: __dump_cleanup()
- * Func: Free any architecture specific data structures. This is called
- * when the dump module is being removed.
- */
-void
-__dump_cleanup(void)
-{
- int ret;
-
- ret = unregister_ioctl32_conversion(DIOSDUMPDEV);
- ret |= unregister_ioctl32_conversion(DIOGDUMPDEV);
- ret |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
- ret |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
- ret |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
- ret |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
- ret |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
- ret |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
- ret |= unregister_ioctl32_conversion(DIOSTARGETIP);
- ret |= unregister_ioctl32_conversion(DIOGTARGETIP);
- ret |= unregister_ioctl32_conversion(DIOSTARGETPORT);
- ret |= unregister_ioctl32_conversion(DIOGTARGETPORT);
- ret |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
- ret |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
- ret |= unregister_ioctl32_conversion(DIOSETHADDR);
- ret |= unregister_ioctl32_conversion(DIOGETHADDR);
- ret |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
- ret |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
- if (ret) {
- printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
- }
- free_dha_stack();
-}
-
-/*
- * Kludge - dump from interrupt context is unreliable (Fixme)
- *
- * We do this so that softirqs initiated for dump i/o
- * get processed and we don't hang while waiting for i/o
- * to complete or in any irq synchronization attempt.
- *
- * This is not quite legal of course, as it has the side
- * effect of making all interrupts & softirqs triggered
- * while dump is in progress complete before currently
- * pending softirqs and the currently executing interrupt
- * code.
- */
-static inline void
-irq_bh_save(void)
-{
- saved_irq_count = irq_count();
- preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-}
-
-static inline void
-irq_bh_restore(void)
-{
- preempt_count() |= saved_irq_count;
-}
-
-/*
- * Name: __dump_irq_enable
- * Func: Reset system so interrupts are enabled.
- * This is used for dump methods that require interrupts
- * Eventually, all methods will have interrupts disabled
- * and this code can be removed.
- *
- * Change irq affinities
- * Re-enable interrupts
- */
-int
-__dump_irq_enable(void)
-{
- __dump_set_irq_affinity();
- irq_bh_save();
- local_irq_enable();
- return 0;
-}
-
-/*
- * Name: __dump_irq_restore
- * Func: Resume the system state in an architecture-specific way.
- */
-void
-__dump_irq_restore(void)
-{
- local_irq_disable();
- __dump_reset_irq_affinity();
- irq_bh_restore();
-}
-
-#if 0
-/* Cheap progress hack. It estimates pages to write and
- * assumes all pages will go -- so it may get way off.
- * As the progress is not displayed for other architectures, not used at this
- * moment.
- */
-void
-__dump_progress_add_page(void)
-{
- unsigned long total_pages = nr_free_pages() + nr_inactive_pages + nr_active_pages;
- unsigned int percent = (dump_header.dh_num_dump_pages * 100) / total_pages;
- char buf[30];
-
- if (percent > last_percent && percent <= 100) {
- sprintf(buf, "Dump %3d%% ", percent);
- ppc64_dump_msg(0x2, buf);
- last_percent = percent;
- }
-
-}
-#endif
-
-extern int dump_page_is_ram(unsigned long);
-/*
- * Name: __dump_page_valid()
- * Func: Check if page is valid to dump.
- */
-int
-__dump_page_valid(unsigned long index)
-{
- if (!pfn_valid(index))
- return 0;
-
- return dump_page_is_ram(index);
-}
-
-/*
- * Name: manual_handle_crashdump()
- * Func: Interface for the lkcd dump command. Calls dump_execute()
- */
-int
-manual_handle_crashdump(void)
-{
- struct pt_regs regs;
-
- get_current_regs(®s);
- dump_execute("manual", ®s);
- return 0;
-}
+++ /dev/null
-/*
- * RLE Compression functions for kernel crash dumps.
- *
- * Created by: Matt Robinson (yakker@sourceforge.net)
- * Copyright 2001 Matt D. Robinson. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/* header files */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/dump.h>
-
-/*
- * Name: dump_compress_rle()
- * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
- * reasonable, if possible. This is the same routine we use in IRIX.
- */
-static u16
-dump_compress_rle(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
-{
- u16 ri, wi, count = 0;
- u_char value = 0, cur_byte;
-
- /*
- * If the block should happen to "compress" to larger than the
- * buffer size, allocate a larger one and change cur_buf_size.
- */
-
- wi = ri = 0;
-
- while (ri < oldsize) {
- if (!ri) {
- cur_byte = value = old[ri];
- count = 0;
- } else {
- if (count == 255) {
- if (wi + 3 > oldsize) {
- return oldsize;
- }
- new[wi++] = 0;
- new[wi++] = count;
- new[wi++] = value;
- value = cur_byte = old[ri];
- count = 0;
- } else {
- if ((cur_byte = old[ri]) == value) {
- count++;
- } else {
- if (count > 1) {
- if (wi + 3 > oldsize) {
- return oldsize;
- }
- new[wi++] = 0;
- new[wi++] = count;
- new[wi++] = value;
- } else if (count == 1) {
- if (value == 0) {
- if (wi + 3 > oldsize) {
- return oldsize;
- }
- new[wi++] = 0;
- new[wi++] = 1;
- new[wi++] = 0;
- } else {
- if (wi + 2 > oldsize) {
- return oldsize;
- }
- new[wi++] = value;
- new[wi++] = value;
- }
- } else { /* count == 0 */
- if (value == 0) {
- if (wi + 2 > oldsize) {
- return oldsize;
- }
- new[wi++] = value;
- new[wi++] = value;
- } else {
- if (wi + 1 > oldsize) {
- return oldsize;
- }
- new[wi++] = value;
- }
- } /* if count > 1 */
-
- value = cur_byte;
- count = 0;
-
- } /* if byte == value */
-
- } /* if count == 255 */
-
- } /* if ri == 0 */
- ri++;
-
- }
- if (count > 1) {
- if (wi + 3 > oldsize) {
- return oldsize;
- }
- new[wi++] = 0;
- new[wi++] = count;
- new[wi++] = value;
- } else if (count == 1) {
- if (value == 0) {
- if (wi + 3 > oldsize)
- return oldsize;
- new[wi++] = 0;
- new[wi++] = 1;
- new[wi++] = 0;
- } else {
- if (wi + 2 > oldsize)
- return oldsize;
- new[wi++] = value;
- new[wi++] = value;
- }
- } else { /* count == 0 */
- if (value == 0) {
- if (wi + 2 > oldsize)
- return oldsize;
- new[wi++] = value;
- new[wi++] = value;
- } else {
- if (wi + 1 > oldsize)
- return oldsize;
- new[wi++] = value;
- }
- } /* if count > 1 */
-
- value = cur_byte;
- count = 0;
- return wi;
-}
-
-/* setup the rle compression functionality */
-static struct __dump_compress dump_rle_compression = {
- .compress_type = DUMP_COMPRESS_RLE,
- .compress_func = dump_compress_rle,
- .compress_name = "RLE",
-};
-
-/*
- * Name: dump_compress_rle_init()
- * Func: Initialize rle compression for dumping.
- */
-static int __init
-dump_compress_rle_init(void)
-{
- dump_register_compression(&dump_rle_compression);
- return 0;
-}
-
-/*
- * Name: dump_compress_rle_cleanup()
- * Func: Remove rle compression for dumping.
- */
-static void __exit
-dump_compress_rle_cleanup(void)
-{
- dump_unregister_compression(DUMP_COMPRESS_RLE);
-}
-
-/* module initialization */
-module_init(dump_compress_rle_init);
-module_exit(dump_compress_rle_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("RLE compression module for crash dump driver");
+++ /dev/null
-/*
- * Default single stage dump scheme methods
- *
- * Previously a part of dump_base.c
- *
- * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
- * Split and rewrote LKCD dump scheme to generic dump method
- * interfaces
- * Derived from original code created by
- * Matt Robinson <yakker@sourceforge.net>)
- *
- * Contributions from SGI, IBM, HP, MCL, and others.
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * Implements the default dump scheme, i.e. single-stage gathering and
- * saving of dump data directly to the target device, which operates in
- * a push mode, where the dumping system decides what data it saves
- * taking into account pre-specified dump config options.
- *
- * Aside: The 2-stage dump scheme, where there is a soft-reset between
- * the gathering and saving phases, also reuses some of these
- * default routines (see dump_overlay.c)
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/nmi.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-
-extern int panic_timeout; /* time before reboot */
-
-extern void dump_speedo(int);
-
-/* Default sequencer used during single stage dumping */
-/* Also invoked during stage 2 of soft-boot based dumping */
-int dump_generic_sequencer(void)
-{
- struct dump_data_filter *filter = dump_config.dumper->filter;
- int pass = 0, err = 0, save = 0;
- int (*action)(unsigned long, unsigned long);
-
- /*
- * We want to save the more critical data areas first in
- * case we run out of space, encounter i/o failures, or get
- * interrupted otherwise and have to give up midway
- * So, run through the passes in increasing order
- */
- for (;filter->selector; filter++, pass++)
- {
- /* Assumes passes are exclusive (even across dumpers) */
- /* Requires care when coding the selection functions */
- if ((save = filter->level_mask & dump_config.level))
- action = dump_save_data;
- else
- action = dump_skip_data;
-
- if ((err = dump_iterator(pass, action, filter)) < 0)
- break;
-
- printk("\n %d dump pages %s of %d each in pass %d\n",
- err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
-
- }
-
- return (err < 0) ? err : 0;
-}
-
-static inline struct page *dump_get_page(loff_t loc)
-{
-
- unsigned long page_index = loc >> PAGE_SHIFT;
-
- /* todo: complete this to account for ia64/discontig mem */
- /* todo: and to check for validity, ram page, no i/o mem etc */
- /* need to use pfn/physaddr equiv of kern_addr_valid */
-
- /* Important:
- * On ARM/XScale system, the physical address starts from
- * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
- * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
- * page index starts from PHYS_PFN_OFFSET. When configuring
- * filter, filter->start is assigned to 0 in dump_generic_configure.
- * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
- */
-#ifdef CONFIG_ARM
- page_index += PHYS_PFN_OFFSET;
-#endif
- if (__dump_page_valid(page_index))
- return pfn_to_page(page_index);
- else
- return NULL;
-
-}
-
-/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
-/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
-int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
- struct dump_data_filter *filter)
-{
- /* Todo : fix unit, type */
- loff_t loc, start, end;
- int i, count = 0, err = 0;
- struct page *page;
-
- /* Todo: Add membanks code */
- /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
-
- for (i = 0; i < filter->num_mbanks; i++) {
- start = filter->start[i];
- end = filter->end[i];
- for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
- dump_config.dumper->curr_loc = loc;
- page = dump_get_page(loc);
- if (page && filter->selector(pass,
- (unsigned long) page, DUMP_PAGE_SIZE)) {
- if ((err = action((unsigned long)page,
- DUMP_PAGE_SIZE))) {
- printk("dump_page_iterator: err %d for "
- "loc 0x%llx, in pass %d\n",
- err, loc, pass);
- return err ? err : count;
- } else
- count++;
- }
- }
- }
-
- return err ? err : count;
-}
-
-/*
- * Base function that saves the selected block of data in the dump
- * Action taken when iterator decides that data needs to be saved
- */
-int dump_generic_save_data(unsigned long loc, unsigned long sz)
-{
- void *buf;
- void *dump_buf = dump_config.dumper->dump_buf;
- int left, bytes, ret;
-
- if ((ret = dump_add_data(loc, sz))) {
- return ret;
- }
- buf = dump_config.dumper->curr_buf;
-
- /* If we've filled up the buffer write it out */
- if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
- bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
- if (bytes < DUMP_BUFFER_SIZE) {
- printk("dump_write_buffer failed %d\n", bytes);
- return bytes ? -ENOSPC : bytes;
- }
-
- left -= bytes;
-
- /* -- A few chores to do from time to time -- */
- dump_config.dumper->count++;
-
- if (!(dump_config.dumper->count & 0x3f)) {
- /* Update the header every one in a while */
- memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
- if ((ret = dump_update_header()) < 0) {
- /* issue warning */
- return ret;
- }
- printk(".");
-
- touch_nmi_watchdog();
- } else if (!(dump_config.dumper->count & 0x7)) {
- /* Show progress so the user knows we aren't hung */
- dump_speedo(dump_config.dumper->count >> 3);
- }
- /* Todo: Touch/Refresh watchdog */
-
- /* --- Done with periodic chores -- */
-
- /*
- * extra bit of copying to simplify verification
- * in the second kernel boot based scheme
- */
- memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
- DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
-
- /* now adjust the leftover bits back to the top of the page */
- /* this case would not arise during stage 2 (passthru) */
- memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
- if (left) {
- memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
- }
- buf -= DUMP_BUFFER_SIZE;
- dump_config.dumper->curr_buf = buf;
- }
-
- return 0;
-}
-
-int dump_generic_skip_data(unsigned long loc, unsigned long sz)
-{
- /* dummy by default */
- return 0;
-}
-
-/*
- * Common low level routine to write a buffer to current dump device
- * Expects checks for space etc to have been taken care of by the caller
- * Operates serially at the moment for simplicity.
- * TBD/Todo: Consider batching for improved throughput
- */
-int dump_ll_write(void *buf, unsigned long len)
-{
- long transferred = 0, last_transfer = 0;
- int ret = 0;
-
- /* make sure device is ready */
- while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
- if (ret < 0) {
- printk("dump_dev_ready failed !err %d\n", ret);
- return ret;
- }
-
- while (len) {
- if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
- ret = last_transfer;
- printk("dump_dev_write failed !err %d\n",
- ret);
- break;
- }
- /* wait till complete */
- while ((ret = dump_dev_ready(buf)) == -EAGAIN)
- cpu_relax();
-
- if (ret < 0) {
- printk("i/o failed !err %d\n", ret);
- break;
- }
-
- len -= last_transfer;
- buf += last_transfer;
- transferred += last_transfer;
- }
- return (ret < 0) ? ret : transferred;
-}
-
-/* default writeout routine for single dump device */
-/* writes out the dump data ensuring enough space is left for the end marker */
-int dump_generic_write_buffer(void *buf, unsigned long len)
-{
- long written = 0;
- int err = 0;
-
- /* check for space */
- if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
- 2*DUMP_BUFFER_SIZE)) < 0) {
- printk("dump_write_buffer: insuff space after offset 0x%llx\n",
- dump_config.dumper->curr_offset);
- return err;
- }
- /* alignment check would happen as a side effect of this */
- if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
- return err;
-
- written = dump_ll_write(buf, len);
-
- /* all or none */
-
- if (written < len)
- written = written ? -ENOSPC : written;
- else
- dump_config.dumper->curr_offset += len;
-
- return written;
-}
-
-int dump_generic_configure(unsigned long devid)
-{
- struct dump_dev *dev = dump_config.dumper->dev;
- struct dump_data_filter *filter;
- void *buf;
- int ret = 0;
-
- /* Allocate the dump buffer and initialize dumper state */
- /* Assume that we get aligned addresses */
- if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
- return -ENOMEM;
-
- if ((unsigned long)buf & (PAGE_SIZE - 1)) {
- /* sanity check for page aligned address */
- dump_free_mem(buf);
- return -ENOMEM; /* fixme: better error code */
- }
-
- /* Initialize the rest of the fields */
- dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
- dumper_reset();
-
- /* Open the dump device */
- if (!dev)
- return -ENODEV;
-
- if ((ret = dev->ops->open(dev, devid))) {
- return ret;
- }
-
- /* Initialise the memory ranges in the dump filter */
- for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
- if (!filter->start[0] && !filter->end[0]) {
- pg_data_t *pgdat;
- int i = 0;
- for_each_pgdat(pgdat) {
- filter->start[i] =
- (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
- filter->end[i] =
- (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
- i++;
- }
- filter->num_mbanks = i;
- }
- }
-
- return 0;
-}
-
-int dump_generic_unconfigure(void)
-{
- struct dump_dev *dev = dump_config.dumper->dev;
- void *buf = dump_config.dumper->dump_buf;
- int ret = 0;
-
- pr_debug("Generic unconfigure\n");
- /* Close the dump device */
- if (dev && (ret = dev->ops->release(dev)))
- return ret;
-
- printk("Closed dump device\n");
-
- if (buf)
- dump_free_mem((buf - DUMP_PAGE_SIZE));
-
- dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
- pr_debug("Released dump buffer\n");
-
- return 0;
-}
-
-
-/* Set up the default dump scheme */
-
-struct dump_scheme_ops dump_scheme_singlestage_ops = {
- .configure = dump_generic_configure,
- .unconfigure = dump_generic_unconfigure,
- .sequencer = dump_generic_sequencer,
- .iterator = dump_page_iterator,
- .save_data = dump_generic_save_data,
- .skip_data = dump_generic_skip_data,
- .write_buffer = dump_generic_write_buffer,
-};
-
-struct dump_scheme dump_scheme_singlestage = {
- .name = "single-stage",
- .ops = &dump_scheme_singlestage_ops
-};
-
-/* The single stage dumper comprising all these */
-struct dumper dumper_singlestage = {
- .name = "single-stage",
- .scheme = &dump_scheme_singlestage,
- .fmt = &dump_fmt_lcrash,
- .compress = &dump_none_compression,
- .filter = dump_filter_table,
- .dev = NULL,
-};
-
+++ /dev/null
-/*
- * Standard kernel function entry points for Linux crash dumps.
- *
- * Created by: Matt Robinson (yakker@sourceforge.net)
- * Contributions from SGI, IBM, HP, MCL, and others.
- *
- * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/*
- * -----------------------------------------------------------------------
- *
- * DUMP HISTORY
- *
- * This dump code goes back to SGI's first attempts at dumping system
- * memory on SGI systems running IRIX. A few developers at SGI needed
- * a way to take this system dump and analyze it, and created 'icrash',
- * or IRIX Crash. The mechanism (the dumps and 'icrash') were used
- * by support people to generate crash reports when a system failure
- * occurred. This was vital for large system configurations that
- * couldn't apply patch after patch after fix just to hope that the
- * problems would go away. So the system memory, along with the crash
- * dump analyzer, allowed support people to quickly figure out what the
- * problem was on the system with the crash dump.
- *
- * In comes Linux. SGI started moving towards the open source community,
- * and upon doing so, SGI wanted to take its support utilities into Linux
- * with the hopes that they would end up the in kernel and user space to
- * be used by SGI's customers buying SGI Linux systems. One of the first
- * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
- * Dumps. LKCD comprises of a patch to the kernel to enable system
- * dumping, along with 'lcrash', or Linux Crash, to analyze the system
- * memory dump. A few additional system scripts and kernel modifications
- * are also included to make the dump mechanism and dump data easier to
- * process and use.
- *
- * As soon as LKCD was released into the open source community, a number
- * of larger companies started to take advantage of it. Today, there are
- * many community members that contribute to LKCD, and it continues to
- * flourish and grow as an open source project.
- */
-
-/*
- * DUMP TUNABLES
- *
- * This is the list of system tunables (via /proc) that are available
- * for Linux systems. All the read, write, etc., functions are listed
- * here. Currently, there are a few different tunables for dumps:
- *
- * dump_device (used to be dumpdev):
- * The device for dumping the memory pages out to. This
- * may be set to the primary swap partition for disruptive dumps,
- * and must be an unused partition for non-disruptive dumps.
- * Todo: In the case of network dumps, this may be interpreted
- * as the IP address of the netdump server to connect to.
- *
- * dump_compress (used to be dump_compress_pages):
- * This is the flag which indicates which compression mechanism
- * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
- * This is the current set of values:
- *
- * 0: DUMP_COMPRESS_NONE -- Don't compress any pages.
- * 1: DUMP_COMPRESS_RLE -- This uses RLE compression.
- * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
- *
- * dump_level:
- * The amount of effort the dump module should make to save
- * information for post crash analysis. This value is now
- * a BITMASK value, not an index:
- *
- * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE)
- *
- * 1: Print out the dump information to the dump header, and
- * write it out to the dump_device. (DUMP_LEVEL_HEADER)
- *
- * 2: Write out the dump header and all kernel memory pages.
- * (DUMP_LEVEL_KERN)
- *
- * 4: Write out the dump header and all kernel and user
- * memory pages. (DUMP_LEVEL_USED)
- *
- * 8: Write out the dump header and all conventional/cached
- * memory (RAM) pages in the system (kernel, user, free).
- * (DUMP_LEVEL_ALL_RAM)
- *
- * 16: Write out everything, including non-conventional memory
- * like firmware, proms, I/O registers, uncached memory.
- * (DUMP_LEVEL_ALL)
- *
- * The dump_level will default to 1.
- *
- * dump_flags:
- * These are the flags to use when talking about dumps. There
- * are lots of possibilities. This is a BITMASK value, not an index.
- *
- * -----------------------------------------------------------------------
- */
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/fs.h>
-#include <linux/dump.h>
-#include "dump_methods.h"
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/highmem.h>
-#include <linux/miscdevice.h>
-#include <linux/sysrq.h>
-#include <linux/sysctl.h>
-#include <linux/nmi.h>
-#include <linux/init.h>
-
-#include <asm/hardirq.h>
-#include <asm/uaccess.h>
-
-/*
- * -----------------------------------------------------------------------
- * V A R I A B L E S
- * -----------------------------------------------------------------------
- */
-
-/* Dump tunables */
-struct dump_config dump_config = {
- .level = 0,
- .flags = 0,
- .dump_device = 0,
- .dump_addr = 0,
- .dumper = NULL
-};
-#ifdef CONFIG_ARM
-static _dump_regs_t all_regs;
-#endif
-
-/* Global variables used in dump.h */
-/* degree of system freeze when dumping */
-enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;
-
-/* Other global fields */
-extern struct __dump_header dump_header;
-struct dump_dev *dump_dev = NULL; /* Active dump device */
-static int dump_compress = 0;
-
-static u16 dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize);
-struct __dump_compress dump_none_compression = {
- .compress_type = DUMP_COMPRESS_NONE,
- .compress_func = dump_compress_none,
- .compress_name = "none",
-};
-
-/* our device operations and functions */
-static int dump_ioctl(struct inode *i, struct file *f,
- unsigned int cmd, unsigned long arg);
-
-static struct file_operations dump_fops = {
- .owner = THIS_MODULE,
- .ioctl = dump_ioctl,
-};
-
-static struct miscdevice dump_miscdev = {
- .minor = CRASH_DUMP_MINOR,
- .name = "dump",
- .fops = &dump_fops,
-};
-MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
-
-/* static variables */
-static int dump_okay = 0; /* can we dump out to disk? */
-static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
-
-/* used for dump compressors */
-static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
-
-/* list of registered dump targets */
-static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
-
-/* lkcd info structure -- this is used by lcrash for basic system data */
-struct __lkcdinfo lkcdinfo = {
- .ptrsz = (sizeof(void *) * 8),
-#if defined(__LITTLE_ENDIAN)
- .byte_order = __LITTLE_ENDIAN,
-#else
- .byte_order = __BIG_ENDIAN,
-#endif
- .page_shift = PAGE_SHIFT,
- .page_size = PAGE_SIZE,
- .page_mask = PAGE_MASK,
- .page_offset = PAGE_OFFSET,
-};
-
-/*
- * -----------------------------------------------------------------------
- * / P R O C T U N A B L E F U N C T I O N S
- * -----------------------------------------------------------------------
- */
-
-static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
- void *buffer, size_t *lenp);
-
-static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
- void *buffer, size_t *lenp);
-/*
- * sysctl-tuning infrastructure.
- */
-static ctl_table dump_table[] = {
- { .ctl_name = CTL_DUMP_LEVEL,
- .procname = DUMP_LEVEL_NAME,
- .data = &dump_config.level,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_doulonghex, },
-
- { .ctl_name = CTL_DUMP_FLAGS,
- .procname = DUMP_FLAGS_NAME,
- .data = &dump_config.flags,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_doulonghex, },
-
- { .ctl_name = CTL_DUMP_COMPRESS,
- .procname = DUMP_COMPRESS_NAME,
- .data = &dump_compress, /* FIXME */
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec, },
-
- { .ctl_name = CTL_DUMP_DEVICE,
- .procname = DUMP_DEVICE_NAME,
- .mode = 0644,
- .data = &dump_config.dump_device, /* FIXME */
- .maxlen = sizeof(int),
- .proc_handler = proc_dump_device },
-
-#ifdef CONFIG_CRASH_DUMP_MEMDEV
- { .ctl_name = CTL_DUMP_ADDR,
- .procname = DUMP_ADDR_NAME,
- .mode = 0444,
- .data = &dump_config.dump_addr,
- .maxlen = sizeof(unsigned long),
- .proc_handler = proc_doulonghex },
-#endif
-
- { 0, }
-};
-
-static ctl_table dump_root[] = {
- { .ctl_name = KERN_DUMP,
- .procname = "dump",
- .mode = 0555,
- .child = dump_table },
- { 0, }
-};
-
-static ctl_table kernel_root[] = {
- { .ctl_name = CTL_KERN,
- .procname = "kernel",
- .mode = 0555,
- .child = dump_root, },
- { 0, }
-};
-
-static struct ctl_table_header *sysctl_header;
-
-/*
- * -----------------------------------------------------------------------
- * C O M P R E S S I O N F U N C T I O N S
- * -----------------------------------------------------------------------
- */
-
-/*
- * Name: dump_compress_none()
- * Func: Don't do any compression, period.
- */
-static u16
-dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
-{
- /* just return the old size */
- return oldsize;
-}
-
-
-/*
- * Name: dump_execute()
- * Func: Execute the dumping process. This makes sure all the appropriate
- * fields are updated correctly, and calls dump_execute_memdump(),
- * which does the real work.
- */
-void
-dump_execute(const char *panic_str, const struct pt_regs *regs)
-{
- int state = -1;
- unsigned long flags;
-
- /* make sure we can dump */
- if (!dump_okay) {
- pr_info("LKCD not yet configured, can't take dump now\n");
- return;
- }
-
- /* Exclude multiple dumps at the same time,
- * and disable interrupts, some drivers may re-enable
- * interrupts in with silence()
- *
- * Try and acquire spin lock. If successful, leave preempt
- * and interrupts disabled. See spin_lock_irqsave in spinlock.h
- */
- local_irq_save(flags);
- if (!spin_trylock(&dump_lock)) {
- local_irq_restore(flags);
- pr_info("LKCD dump already in progress\n");
- return;
- }
-
- /* Bring system into the strictest level of quiescing for min drift
- * dump drivers can soften this as required in dev->ops->silence()
- */
- dump_oncpu = smp_processor_id() + 1;
- dump_silence_level = DUMP_HARD_SPIN_CPUS;
-
- state = dump_generic_execute(panic_str, regs);
-
- dump_oncpu = 0;
- spin_unlock_irqrestore(&dump_lock, flags);
-
- if (state < 0) {
- printk("Dump Incomplete or failed!\n");
- } else {
- printk("Dump Complete; %d dump pages saved.\n",
- dump_header.dh_num_dump_pages);
- }
-}
-
-/*
- * Name: dump_register_compression()
- * Func: Register a dump compression mechanism.
- */
-void
-dump_register_compression(struct __dump_compress *item)
-{
- if (item)
- list_add(&(item->list), &dump_compress_list);
-}
-
-/*
- * Name: dump_unregister_compression()
- * Func: Remove a dump compression mechanism, and re-assign the dump
- * compression pointer if necessary.
- */
-void
-dump_unregister_compression(int compression_type)
-{
- struct list_head *tmp;
- struct __dump_compress *dc;
-
- /* let's make sure our list is valid */
- if (compression_type != DUMP_COMPRESS_NONE) {
- list_for_each(tmp, &dump_compress_list) {
- dc = list_entry(tmp, struct __dump_compress, list);
- if (dc->compress_type == compression_type) {
- list_del(&(dc->list));
- break;
- }
- }
- }
-}
-
-/*
- * Name: dump_compress_init()
- * Func: Initialize (or re-initialize) compression scheme.
- */
-static int
-dump_compress_init(int compression_type)
-{
- struct list_head *tmp;
- struct __dump_compress *dc;
-
- /* try to remove the compression item */
- list_for_each(tmp, &dump_compress_list) {
- dc = list_entry(tmp, struct __dump_compress, list);
- if (dc->compress_type == compression_type) {
- dump_config.dumper->compress = dc;
- dump_compress = compression_type;
- pr_debug("Dump Compress %s\n", dc->compress_name);
- return 0;
- }
- }
-
- /*
- * nothing on the list -- return ENODATA to indicate an error
- *
- * NB:
- * EAGAIN: reports "Resource temporarily unavailable" which
- * isn't very enlightening.
- */
- printk("compression_type:%d not found\n", compression_type);
-
- return -ENODATA;
-}
-
-static int
-dumper_setup(unsigned long flags, unsigned long devid)
-{
- int ret = 0;
-
- /* unconfigure old dumper if it exists */
- dump_okay = 0;
- if (dump_config.dumper) {
- pr_debug("Unconfiguring current dumper\n");
- dump_unconfigure();
- }
- /* set up new dumper */
- if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
- printk("Configuring softboot based dump \n");
-#ifdef CONFIG_CRASH_DUMP_MEMDEV
- dump_config.dumper = &dumper_stage1;
-#else
- printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
- return -1;
-#endif
- } else {
- dump_config.dumper = &dumper_singlestage;
- }
- dump_config.dumper->dev = dump_dev;
-
- ret = dump_configure(devid);
- if (!ret) {
- dump_okay = 1;
- pr_debug("%s dumper set up for dev 0x%lx\n",
- dump_config.dumper->name, devid);
- dump_config.dump_device = devid;
- } else {
- printk("%s dumper set up failed for dev 0x%lx\n",
- dump_config.dumper->name, devid);
- dump_config.dumper = NULL;
- }
- return ret;
-}
-
-static int
-dump_target_init(int target)
-{
- char type[20];
- struct list_head *tmp;
- struct dump_dev *dev;
-
- switch (target) {
- case DUMP_FLAGS_DISKDUMP:
- strcpy(type, "blockdev"); break;
- case DUMP_FLAGS_NETDUMP:
- strcpy(type, "networkdev"); break;
- default:
- return -1;
- }
-
- /*
- * This is a bit stupid, generating strings from flag
- * and doing strcmp. This is done because 'struct dump_dev'
- * has string 'type_name' and not interger 'type'.
- */
- list_for_each(tmp, &dump_target_list) {
- dev = list_entry(tmp, struct dump_dev, list);
- if (strcmp(type, dev->type_name) == 0) {
- dump_dev = dev;
- return 0;
- }
- }
- return -1;
-}
-
-/*
- * Name: dump_ioctl()
- * Func: Allow all dump tunables through a standard ioctl() mechanism.
- * This is far better than before, where we'd go through /proc,
- * because now this will work for multiple OS and architectures.
- */
-static int
-dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
-{
- /* check capabilities */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
- /* dump device must be configured first */
- return -ENODEV;
-
- /*
- * This is the main mechanism for controlling get/set data
- * for various dump device parameters. The real trick here
- * is setting the dump device (DIOSDUMPDEV). That's what
- * triggers everything else.
- */
- switch (cmd) {
- case DIOSDUMPDEV: /* set dump_device */
- pr_debug("Configuring dump device\n");
- if (!(f->f_flags & O_RDWR))
- return -EPERM;
-
- __dump_open();
- return dumper_setup(dump_config.flags, arg);
-
-
- case DIOGDUMPDEV: /* get dump_device */
- return put_user((long)dump_config.dump_device, (long *)arg);
-
- case DIOSDUMPLEVEL: /* set dump_level */
- if (!(f->f_flags & O_RDWR))
- return -EPERM;
-
- /* make sure we have a positive value */
- if (arg < 0)
- return -EINVAL;
-
- /* Fixme: clean this up */
- dump_config.level = 0;
- switch ((int)arg) {
- case DUMP_LEVEL_ALL:
- case DUMP_LEVEL_ALL_RAM:
- dump_config.level |= DUMP_MASK_UNUSED;
- case DUMP_LEVEL_USED:
- dump_config.level |= DUMP_MASK_USED;
- case DUMP_LEVEL_KERN:
- dump_config.level |= DUMP_MASK_KERN;
- case DUMP_LEVEL_HEADER:
- dump_config.level |= DUMP_MASK_HEADER;
- case DUMP_LEVEL_NONE:
- break;
- default:
- return (-EINVAL);
- }
- pr_debug("Dump Level 0x%lx\n", dump_config.level);
- break;
-
- case DIOGDUMPLEVEL: /* get dump_level */
- /* fixme: handle conversion */
- return put_user((long)dump_config.level, (long *)arg);
-
-
- case DIOSDUMPFLAGS: /* set dump_flags */
- /* check flags */
- if (!(f->f_flags & O_RDWR))
- return -EPERM;
-
- /* make sure we have a positive value */
- if (arg < 0)
- return -EINVAL;
-
- if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
- return -EINVAL; /* return proper error */
-
- dump_config.flags = arg;
-
- pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
- break;
-
- case DIOGDUMPFLAGS: /* get dump_flags */
- return put_user((long)dump_config.flags, (long *)arg);
-
- case DIOSDUMPCOMPRESS: /* set the dump_compress status */
- if (!(f->f_flags & O_RDWR))
- return -EPERM;
-
- return dump_compress_init((int)arg);
-
- case DIOGDUMPCOMPRESS: /* get the dump_compress status */
- return put_user((long)(dump_config.dumper ?
- dump_config.dumper->compress->compress_type : 0),
- (long *)arg);
- case DIOGDUMPOKAY: /* check if dump is configured */
- return put_user((long)dump_okay, (long *)arg);
-
- case DIOSDUMPTAKE: /* Trigger a manual dump */
- /* Do not proceed if lkcd not yet configured */
- if(!dump_okay) {
- printk("LKCD not yet configured. Cannot take manual dump\n");
- return -ENODEV;
- }
-
- /* Take the dump */
- return manual_handle_crashdump();
-
- default:
- /*
- * these are network dump specific ioctls, let the
- * module handle them.
- */
- return dump_dev_ioctl(cmd, arg);
- }
- return 0;
-}
-
-/*
- * Handle special cases for dump_device
- * changing dump device requires doing an opening the device
- */
-static int
-proc_dump_device(ctl_table *ctl, int write, struct file *f,
- void *buffer, size_t *lenp)
-{
- int *valp = ctl->data;
- int oval = *valp;
- int ret = -EPERM;
-
- /* same permission checks as ioctl */
- if (capable(CAP_SYS_ADMIN)) {
- ret = proc_doulonghex(ctl, write, f, buffer, lenp);
- if (ret == 0 && write && *valp != oval) {
- /* need to restore old value to close properly */
- dump_config.dump_device = (dev_t) oval;
- __dump_open();
- ret = dumper_setup(dump_config.flags, (dev_t) *valp);
- }
- }
-
- return ret;
-}
-
-/* All for the want of a proc_do_xxx routine which prints values in hex */
-static int
-proc_doulonghex(ctl_table *ctl, int write, struct file *f,
- void *buffer, size_t *lenp)
-{
-#define TMPBUFLEN 20
- unsigned long *i;
- size_t len, left;
- char buf[TMPBUFLEN];
-
- if (!ctl->data || !ctl->maxlen || !*lenp || (f->f_pos)) {
- *lenp = 0;
- return 0;
- }
-
- i = (unsigned long *) ctl->data;
- left = *lenp;
-
- sprintf(buf, "0x%lx\n", (*i));
- len = strlen(buf);
- if (len > left)
- len = left;
- if(copy_to_user(buffer, buf, len))
- return -EFAULT;
-
- left -= len;
- *lenp -= left;
- f->f_pos += *lenp;
- return 0;
-}
-
-/*
- * -----------------------------------------------------------------------
- * I N I T F U N C T I O N S
- * -----------------------------------------------------------------------
- */
-
-/*
- * These register and unregister routines are exported for modules
- * to register their dump drivers (like block, net etc)
- */
-int
-dump_register_device(struct dump_dev *ddev)
-{
- struct list_head *tmp;
- struct dump_dev *dev;
-
- list_for_each(tmp, &dump_target_list) {
- dev = list_entry(tmp, struct dump_dev, list);
- if (strcmp(ddev->type_name, dev->type_name) == 0) {
- printk("Target type %s already registered\n",
- dev->type_name);
- return -1; /* return proper error */
- }
- }
- list_add(&(ddev->list), &dump_target_list);
-
- return 0;
-}
-
-void
-dump_unregister_device(struct dump_dev *ddev)
-{
- list_del(&(ddev->list));
- if (ddev != dump_dev)
- return;
-
- dump_okay = 0;
-
- if (dump_config.dumper)
- dump_unconfigure();
-
- dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
- dump_okay = 0;
- dump_dev = NULL;
- dump_config.dumper = NULL;
-}
-
-static int panic_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
-#ifdef CONFIG_ARM
- get_current_general_regs(&all_regs);
- get_current_cp14_regs(&all_regs);
- get_current_cp15_regs(&all_regs);
- dump_execute((const char *)ptr, &all_regs);
-#else
- struct pt_regs regs;
-
- get_current_regs(®s);
- dump_execute((const char *)ptr, ®s);
-#endif
- return 0;
-}
-
-extern struct notifier_block *panic_notifier_list;
-static int panic_event(struct notifier_block *, unsigned long, void *);
-static struct notifier_block panic_block = {
- .notifier_call = panic_event,
-};
-
-#ifdef CONFIG_MAGIC_SYSRQ
-/* Sysrq handler */
-static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
- struct tty_struct *tty) {
- dump_execute("sysrq", pt_regs);
-}
-
-static struct sysrq_key_op sysrq_crashdump_op = {
- .handler = sysrq_handle_crashdump,
- .help_msg = "Dump",
- .action_msg = "Starting crash dump",
-};
-#endif
-
-static inline void
-dump_sysrq_register(void)
-{
-#ifdef CONFIG_MAGIC_SYSRQ
- __sysrq_lock_table();
- __sysrq_put_key_op(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
- __sysrq_unlock_table();
-#endif
-}
-
-static inline void
-dump_sysrq_unregister(void)
-{
-#ifdef CONFIG_MAGIC_SYSRQ
- __sysrq_lock_table();
- if (__sysrq_get_key_op(DUMP_SYSRQ_KEY) == &sysrq_crashdump_op)
- __sysrq_put_key_op(DUMP_SYSRQ_KEY, NULL);
- __sysrq_unlock_table();
-#endif
-}
-
-/*
- * Name: dump_init()
- * Func: Initialize the dump process. This will set up any architecture
- * dependent code. The big key is we need the memory offsets before
- * the page table is initialized, because the base memory offset
- * is changed after paging_init() is called.
- */
-static int __init
-dump_init(void)
-{
- struct sysinfo info;
- int err;
-
- /* try to create our dump device */
- err = misc_register(&dump_miscdev);
- if (err) {
- printk("cannot register dump character device!\n");
- return err;
- }
-
- __dump_init((u64)PAGE_OFFSET);
-
- /* set the dump_compression_list structure up */
- dump_register_compression(&dump_none_compression);
-
- /* grab the total memory size now (not if/when we crash) */
- si_meminfo(&info);
-
- /* set the memory size */
- dump_header.dh_memory_size = (u64)info.totalram;
-
- sysctl_header = register_sysctl_table(kernel_root, 0);
- dump_sysrq_register();
-
- notifier_chain_register(&panic_notifier_list, &panic_block);
- dump_function_ptr = dump_execute;
-
- pr_info("Crash dump driver initialized.\n");
- return 0;
-}
-
-static void __exit
-dump_cleanup(void)
-{
- dump_okay = 0;
-
- if (dump_config.dumper)
- dump_unconfigure();
-
- /* arch-specific cleanup routine */
- __dump_cleanup();
-
- /* ignore errors while unregistering -- since can't do anything */
- unregister_sysctl_table(sysctl_header);
- misc_deregister(&dump_miscdev);
- dump_sysrq_unregister();
- notifier_chain_unregister(&panic_notifier_list, &panic_block);
- dump_function_ptr = NULL;
-}
-
-EXPORT_SYMBOL(dump_register_compression);
-EXPORT_SYMBOL(dump_unregister_compression);
-EXPORT_SYMBOL(dump_register_device);
-EXPORT_SYMBOL(dump_unregister_device);
-EXPORT_SYMBOL(dump_config);
-EXPORT_SYMBOL(dump_silence_level);
-
-EXPORT_SYMBOL(__dump_irq_enable);
-EXPORT_SYMBOL(__dump_irq_restore);
-
-MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
-MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
-MODULE_LICENSE("GPL");
-
-module_init(dump_init);
-module_exit(dump_cleanup);
static int __init soc_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
struct soc *s;
int cards = 0;
static int __init socal_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
struct socal *s;
int cards = 0;
/*
- * Parse the EFI PCDP table to locate the console device.
- *
- * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2002, 2003, 2004 Hewlett-Packard Co.
+ * Khalid Aziz <khalid_aziz@hp.com>
* Alex Williamson <alex.williamson@hp.com>
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Parse the EFI PCDP table to locate the console device.
*/
#include <linux/acpi.h>
/*
+ * Copyright (C) 2002, 2004 Hewlett-Packard Co.
+ * Khalid Aziz <khalid_aziz@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
* Definitions for PCDP-defined console devices
*
* v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf
* v2.0: http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf
- *
- * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define PCDP_CONSOLE 0
/* Allocate space for two transmit and two receive buffer
* descriptors in the DP ram.
*/
- data->dp_addr = cpm_dpalloc(sizeof(cbd_t) * 4, 8);
-
+ data->dp_addr = m8xx_cpm_dpram_offset(m8xx_cpm_dpalloc(sizeof(cbd_t)
+ * 4));
+
/* ptr to i2c area */
data->i2c = (i2c8xx_t *)&(((immap_t *)IMAP_ADDR)->im_i2c);
}
config SENSORS_VIA686A
tristate "VIA686A"
- depends on I2C && PCI && EXPERIMENTAL
+ depends on I2C && EXPERIMENTAL
select I2C_SENSOR
select I2C_ISA
help
config BLK_DEV_IDE_PMAC
bool "Builtin PowerMac IDE support"
- depends on PPC_PMAC && IDE=y
+ depends on PPC_PMAC
help
This driver provides support for the built-in IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
do_end_request = 1;
} else if (sense_key == ILLEGAL_REQUEST ||
sense_key == DATA_PROTECT) {
+ /*
+ * check if this was a write protected media
+ */
+ if (rq_data_dir(rq) == WRITE) {
+ printk("ide-cd: media marked write protected\n");
+ set_disk_ro(drive->disk, 1);
+ }
+
/* No point in retrying after an illegal
request or data protect error.*/
ide_dump_status (drive, "command error", stat);
* sg request
*/
if (rq->bio) {
- int mask = drive->queue->dma_alignment;
- unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
-
- info->cmd = rq_data_dir(rq);
+ if (rq->data_len & 3) {
+ printk("%s: block pc not aligned, len=%d\n", drive->name, rq->data_len);
+ cdrom_end_request(drive, 0);
+ return ide_stopped;
+ }
info->dma = drive->using_dma;
-
- /*
- * check if dma is safe
- */
- if ((rq->data_len & mask) || (addr & mask))
- info->dma = 0;
+ info->cmd = rq_data_dir(rq);
}
/* Start sending the command to the drive. */
int nslots;
blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
- blk_queue_dma_alignment(drive->queue, 31);
+ blk_queue_dma_alignment(drive->queue, 3);
drive->queue->unplug_delay = (1 * HZ) / 1000;
if (!drive->queue->unplug_delay)
drive->queue->unplug_delay = 1;
nslots = ide_cdrom_probe_capabilities (drive);
/*
- * set correct block size
+ * set correct block size and read-only for non-ram media
*/
+ set_disk_ro(drive->disk, !CDROM_CONFIG_FLAGS(drive)->ram);
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
#if 0
{
struct block_device *bdev = inode->i_bdev;
ide_drive_t *drive = bdev->bd_disk->private_data;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
if (err == -EINVAL) {
struct cdrom_info *info = drive->driver_data;
- err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
+ err = cdrom_ioctl(&info->devinfo, inode, cmd, arg);
}
return err;
}
unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
- return generic_ide_ioctl(file, bdev, cmd, arg);
+ return generic_ide_ioctl(bdev, cmd, arg);
}
static int idedisk_media_changed(struct gendisk *disk)
ide_drive_t *drive = bdev->bd_disk->private_data;
idefloppy_floppy_t *floppy = drive->driver_data;
void __user *argp = (void __user *)arg;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
int prevent = (arg) ? 1 : 0;
idefloppy_pc_t pc;
if (err != -EINVAL)
idetape_tape_t *tape = drive->driver_data;
ssize_t bytes_read,temp, actually_read = 0, rc;
+ if (ppos != &file->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ return -ENXIO;
+ }
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 3)
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count);
idetape_tape_t *tape = drive->driver_data;
ssize_t retval, actually_written = 0;
+ if (ppos != &file->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ return -ENXIO;
+ }
+
/* The drive is write protected. */
if (tape->write_prot)
return -EACCES;
idetape_pc_t pc;
int retval;
- nonseekable_open(inode, filp);
#if IDETAPE_DEBUG_LOG
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_open\n");
#endif /* IDETAPE_DEBUG_LOG */
{
struct block_device *bdev = inode->i_bdev;
ide_drive_t *drive = bdev->bd_disk->private_data;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
if (err == -EINVAL)
err = idetape_blkdev_ioctl(drive, cmd, arg);
return err;
return ide_do_drive_cmd(drive, &rq, ide_head_wait);
}
-int generic_ide_ioctl(struct file *file, struct block_device *bdev,
- unsigned int cmd, unsigned long arg)
+int generic_ide_ioctl(struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
{
ide_drive_t *drive = bdev->bd_disk->private_data;
ide_settings_t *setting;
case CDROMEJECT:
case CDROMCLOSETRAY:
- return scsi_cmd_ioctl(file, bdev->bd_disk, cmd, p);
+ return scsi_cmd_ioctl(bdev->bd_disk, cmd, p);
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
/*
- * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
+ * linux/drivers/ide/pci/hpt366.c Version 0.34 Sept 17, 2002
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
- * Portions Copyright (C) 2003 Red Hat Inc
*
* Thanks to HighPoint Technologies for their assistance, and hardware.
* Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
* Reset the hpt366 on error, reset on dma
* Fix disabling Fast Interrupt hpt366.
* Mike Waychison <crlf@sun.com>
- *
- * Added support for 372N clocking and clock switching. The 372N needs
- * different clocks on read/write. This requires overloading rw_disk and
- * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
- * keeping me sane.
- * Alan Cox <alan@redhat.com>
- *
*/
class_rev &= 0xff;
switch(dev->device) {
- /* Remap new 372N onto 372 */
- case PCI_DEVICE_ID_TTI_HPT372N:
- class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
case PCI_DEVICE_ID_TTI_HPT374:
class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
case PCI_DEVICE_ID_TTI_HPT371:
return mode;
}
-/*
- * Note for the future; the SATA hpt37x we must set
- * either PIO or UDMA modes 0,4,5
- */
-
static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
return __ide_dma_end(drive);
}
-/**
- * hpt372n_set_clock - perform clock switching dance
- * @drive: Drive to switch
- * @mode: Switching mode (0x21 for write, 0x23 otherwise)
- *
- * Switch the DPLL clock on the HPT372N devices. This is a
- * right mess.
- */
-
-static void hpt372n_set_clock(ide_drive_t *drive, int mode)
-{
- ide_hwif_t *hwif = HWIF(drive);
-
- /* FIXME: should we check for DMA active and BUG() */
- /* Tristate the bus */
- outb(0x80, hwif->dma_base+0x73);
- outb(0x80, hwif->dma_base+0x77);
-
- /* Switch clock and reset channels */
- outb(mode, hwif->dma_base+0x7B);
- outb(0xC0, hwif->dma_base+0x79);
-
- /* Reset state machines */
- outb(0x37, hwif->dma_base+0x70);
- outb(0x37, hwif->dma_base+0x74);
-
- /* Complete reset */
- outb(0x00, hwif->dma_base+0x79);
-
- /* Reconnect channels to bus */
- outb(0x00, hwif->dma_base+0x73);
- outb(0x00, hwif->dma_base+0x77);
-}
-
-/**
- * hpt372n_rw_disk - wrapper for I/O
- * @drive: drive for command
- * @rq: block request structure
- * @block: block number
- *
- * This is called when a disk I/O is issued to the 372N instead
- * of the default functionality. We need it because of the clock
- * switching
- *
- */
-
-static ide_startstop_t hpt372n_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block)
-{
- int wantclock;
-
- if(rq_data_dir(rq) == READ)
- wantclock = 0x21;
- else
- wantclock = 0x23;
-
- if(HWIF(drive)->config_data != wantclock)
- {
- hpt372n_set_clock(drive, wantclock);
- HWIF(drive)->config_data = wantclock;
- }
- return __ide_do_rw_disk(drive, rq, block);
-}
-
/*
* Since SUN Cobalt is attempting to do this operation, I should disclose
* this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
u16 freq;
u32 pll;
u8 reg5bh;
+
+#if 1
u8 reg5ah = 0;
- unsigned long dmabase = pci_resource_start(dev, 4);
- u8 did, rid;
- int is_372n = 0;
-
pci_read_config_byte(dev, 0x5a, ®5ah);
/* interrupt force enable */
pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
-
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
+#endif
/*
* default to pci clock. make sure MA15/16 are set to output
/*
* set up the PLL. we need to adjust it so that it's stable.
* freq = Tpll * 192 / Tpci
- *
- * Todo. For non x86 should probably check the dword is
- * set to 0xABCDExxx indicating the BIOS saved f_CNT
*/
pci_read_config_word(dev, 0x78, &freq);
freq &= 0x1FF;
-
- /*
- * The 372N uses different PCI clock information and has
- * some other complications
- * On PCI33 timing we must clock switch
- * On PCI66 timing we must NOT use the PCI clock
- *
- * Currently we always set up the PLL for the 372N
- */
-
- pci_set_drvdata(dev, NULL);
-
- if(is_372n)
- {
- printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
- if(freq < 0x55)
- pll = F_LOW_PCI_33;
- else if(freq < 0x70)
- pll = F_LOW_PCI_40;
- else if(freq < 0x7F)
- pll = F_LOW_PCI_50;
+ if (freq < 0xa0) {
+ pll = F_LOW_PCI_33;
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
else
- pll = F_LOW_PCI_66;
-
- printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll);
-
- /* We always use the pll not the PCI clock on 372N */
- }
- else
- {
- if(freq < 0x9C)
- pll = F_LOW_PCI_33;
- else if(freq < 0xb0)
- pll = F_LOW_PCI_40;
- else if(freq <0xc8)
- pll = F_LOW_PCI_50;
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
+ printk("HPT37X: using 33MHz PCI clock\n");
+ } else if (freq < 0xb0) {
+ pll = F_LOW_PCI_40;
+ } else if (freq < 0xc8) {
+ pll = F_LOW_PCI_50;
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, NULL);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
else
- pll = F_LOW_PCI_66;
-
- if (pll == F_LOW_PCI_33) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
- printk("HPT37X: using 33MHz PCI clock\n");
- } else if (pll == F_LOW_PCI_40) {
- /* Unsupported */
- } else if (pll == F_LOW_PCI_50) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, NULL);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) fifty_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- printk("HPT37X: using 50MHz PCI clock\n");
- } else {
- if (hpt_minimum_revision(dev,8))
- {
- printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
- }
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
- printk("HPT37X: using 66MHz PCI clock\n");
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ printk("HPT37X: using 50MHz PCI clock\n");
+ } else {
+ pll = F_LOW_PCI_66;
+ if (hpt_minimum_revision(dev,8))
+ {
+ printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
+ pci_set_drvdata(dev, NULL);
}
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
+ else
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
+ printk("HPT37X: using 66MHz PCI clock\n");
}
/*
if (pci_get_drvdata(dev))
goto init_hpt37X_done;
- if (hpt_minimum_revision(dev,8))
- {
- printk(KERN_ERR "HPT374: Only 33MHz PCI timings are supported.\n");
- return -EOPNOTSUPP;
- }
/*
* adjust PLL based upon PCI clock, enable it, and wait for
* stabilization.
{
struct pci_dev *dev = hwif->pci_dev;
u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
- u8 did, rid;
- unsigned long dmabase = hwif->dma_base;
- int is_372n = 0;
-
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
-
+
hwif->tuneproc = &hpt3xx_tune_drive;
hwif->speedproc = &hpt3xx_tune_chipset;
hwif->quirkproc = &hpt3xx_quirkproc;
hwif->intrproc = &hpt3xx_intrproc;
hwif->maskproc = &hpt3xx_maskproc;
-
- if(is_372n)
- hwif->rw_disk = &hpt372n_rw_disk;
/*
* The HPT37x uses the CBLID pins as outputs for MA15/MA16
u8 pin1 = 0, pin2 = 0;
unsigned int class_rev;
char *chipset_names[] = {"HPT366", "HPT366", "HPT368",
- "HPT370", "HPT370A", "HPT372",
- "HPT372N" };
+ "HPT370", "HPT370A", "HPT372"};
if (PCI_FUNC(dev->devfn) & 1)
return;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
- if(dev->device == PCI_DEVICE_ID_TTI_HPT372N)
- class_rev = 6;
-
- if(class_rev <= 6)
- d->name = chipset_names[class_rev];
+ strcpy(d->name, chipset_names[class_rev]);
switch(class_rev) {
- case 6:
case 5:
case 4:
case 3: ide_setup_pci_device(dev, d);
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
- { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl);
.channels = 2, /* 4 */
.autodma = AUTODMA,
.bootable = OFF_BOARD,
- },{ /* 5 */
- .name = "HPT372N",
- .init_setup = init_setup_hpt37x,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- .channels = 2, /* 4 */
- .autodma = AUTODMA,
- .bootable = OFF_BOARD,
}
};
if (!pmif->mediabay) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(IDE_WAKEUP_DELAY);
}
/* Sanitize drive timings */
/* This is necessary to enable IDE when net-booting */
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(IDE_WAKEUP_DELAY);
}
/* Setup MMIO ops */
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
- depends on IEEE1394 && SCSI && (PCI || BROKEN)
+ depends on IEEE1394 && SCSI
help
This option enables you to use SBP-2 devices connected to your IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices.
static int dv1394_fasync(int fd, struct file *file, int on)
{
/* I just copied this code verbatim from Alan Cox's mouse driver example
- (Documentation/DocBook/) */
+ (linux/Documentation/DocBook/) */
struct video_card *video = file_to_video_card(file);
if (file->f_op->ioctl != dv1394_ioctl)
return -EFAULT;
- if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
+ if (copy_from_user(&dv32, (void *)arg, sizeof(dv32)))
return -EFAULT;
dv.api_version = dv32.api_version;
dv32.n_clear_frames = dv.n_clear_frames;
dv32.dropped_frames = dv.dropped_frames;
- if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
+ if (copy_to_user((struct dv1394_status32 *)arg, &dv32, sizeof(dv32)))
ret = -EFAULT;
}
return 0;
}
-static inline void purge_partial_datagram(struct list_head *old)
-{
- struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
- struct list_head *lh, *n;
-
- list_for_each_safe(lh, n, &pd->frag_info) {
- struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
- list_del(lh);
- kfree(fi);
- }
- list_del(old);
- kfree_skb(pd->skb);
- kfree(pd);
-}
/******************************************
* 1394 bus activity functions
return 0;
}
+static inline void purge_partial_datagram(struct list_head *old)
+{
+ struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+ struct list_head *lh, *n;
+
+ list_for_each_safe(lh, n, &pd->frag_info) {
+ struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+ list_del(lh);
+ kfree(fi);
+ }
+ list_del(old);
+ kfree_skb(pd->skb);
+ kfree(pd);
+}
+
static inline int is_datagram_complete(struct list_head *lh, int dg_size)
{
struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
#include "raw1394.h"
#include "raw1394-private.h"
-#define int2ptr(x) ((void __user *)(unsigned long)x)
+#if BITS_PER_LONG == 64
+#define int2ptr(x) ((void __user *)x)
#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
+#else
+#define int2ptr(x) ((void __user *)(u32)x)
+#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
+#endif
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
#define RAW1394_DEBUG
if (another_host) {
DBGMSG("another hosts entry is valid -> SUCCESS");
if (copy_to_user(int2ptr(req->req.recvb),
- &addr->start,sizeof(u64))) {
+ int2ptr(&addr->start),sizeof(u64))) {
printk(KERN_ERR "raw1394: arm_register failed "
" address-range-entry is invalid -> EFAULT !!!\n");
vfree(addr->addr_space_buffer);
case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
{
struct video1394_wait v;
- unsigned int *psizes = NULL;
+ struct video1394_queue_variable qv;
struct dma_iso_ctx *d;
+ qv.packet_sizes = NULL;
+
if (copy_from_user(&v, argp, sizeof(v)))
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+ unsigned int *psizes;
int buf_size = d->nb_cmd * sizeof(unsigned int);
- struct video1394_queue_variable __user *p = argp;
- unsigned int __user *qv;
- if (get_user(qv, &p->packet_sizes))
+ if (copy_from_user(&qv, argp, sizeof(qv)))
return -EFAULT;
psizes = kmalloc(buf_size, GFP_KERNEL);
if (!psizes)
return -ENOMEM;
- if (copy_from_user(psizes, qv, buf_size)) {
+ if (copy_from_user(psizes, qv.packet_sizes, buf_size)) {
kfree(psizes);
return -EFAULT;
}
+
+ qv.packet_sizes = psizes;
}
spin_lock_irqsave(&d->lock,flags);
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
- if (psizes)
- kfree(psizes);
+ if (qv.packet_sizes)
+ kfree(qv.packet_sizes);
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
initialize_dma_it_prg_var_packet_queue(
- d, v.buffer, psizes,
+ d, v.buffer, qv.packet_sizes,
ohci);
}
}
}
- if (psizes)
- kfree(psizes);
+ if (qv.packet_sizes)
+ kfree(qv.packet_sizes);
return 0;
static int video1394_wr_wait32(unsigned int fd, unsigned int cmd, unsigned long arg,
struct file *file)
{
- struct video1394_wait32 __user *argp = (void __user *)arg;
struct video1394_wait32 wait32;
struct video1394_wait wait;
mm_segment_t old_fs;
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, argp, sizeof(wait32)))
+ if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
- if (copy_to_user(argp, &wait32, sizeof(wait32)))
+ if (copy_to_user((struct video1394_wait32 *)arg, &wait32, sizeof(wait32)))
ret = -EFAULT;
}
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
+ if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
struct video1394_queue_variable {
unsigned int channel;
unsigned int buffer;
- unsigned int __user * packet_sizes; /* Buffer of size:
+ unsigned int* packet_sizes; /* Buffer of size:
buf_size / packet_size */
};
return count;
}
-#elif defined(__x86_64__)
+#elif __x86_64__
#define GET_TIME(x) rdtscl(x)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__)
+#elif __alpha__
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC"
char name[64];
char phys[32];
char type;
- volatile s8 reset;
- volatile s8 layout;
+ volatile char reset;
+ volatile char layout;
};
/*
if (pc110pad_used++)
return 0;
- pc110pad_interrupt(0,NULL,NULL);
- pc110pad_interrupt(0,NULL,NULL);
- pc110pad_interrupt(0,NULL,NULL);
+ pc110pad_interrupt(0,0,0);
+ pc110pad_interrupt(0,0,0);
+ pc110pad_interrupt(0,0,0);
outb(PC110PAD_ON, pc110pad_io + 2);
pc110pad_count = 0;
outb(PC110PAD_OFF, pc110pad_io + 2);
- if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", NULL))
+ if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", 0))
{
release_region(pc110pad_io, 4);
printk(KERN_ERR "pc110pad: Unable to get irq %d.\n", pc110pad_irq);
outb(PC110PAD_OFF, pc110pad_io + 2);
- free_irq(pc110pad_irq, NULL);
+ free_irq(pc110pad_irq, 0);
release_region(pc110pad_io, 4);
}
struct sk_buff *skb;
size_t copied;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!cdev->ap.applid)
return -ENODEV;
struct sk_buff *skb;
u16 mlen;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!cdev->ap.applid)
return -ENODEV;
if ((file->private_data = capidev_alloc()) == 0)
return -ENOMEM;
- return nonseekable_open(inode, file);
+ return 0;
}
static int
if ((len = strlen(inf->info_start)) <= count) {
if (copy_to_user(buf, inf->info_start, len))
return -EFAULT;
- *off += len;
+ file->f_pos += len;
return (len);
}
return (0);
(struct divert_info **) filep->private_data = &divert_info_head;
spin_unlock_irqrestore( &divert_info_lock, flags );
/* start_divert(); */
- return nonseekable_open(ino, filep);
+ return (0);
} /* isdn_divert_open */
/*******************/
card = kmalloc(sizeof(*card), GFP_KERNEL);
if (!card)
- return NULL;
+ return 0;
memset(card, 0, sizeof(*card));
cinfo = kmalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL);
if (!cinfo) {
kfree(card);
- return NULL;
+ return 0;
}
memset(cinfo, 0, sizeof(*cinfo) * nr_controllers);
err_kfree:
kfree(p);
err:
- return NULL;
+ return 0;
}
void avmcard_dma_free(avmcard_dmainfo *p)
int str_length;
int *str_msg;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
if (!file->private_data) {
for (;;) {
while (
filep->private_data = NULL;
- return nonseekable_open(ino, filep);
+ return (0);
}
static int maint_close(struct inode *ino, struct file *filep)
if (*off)
return 0;
+ if (off != &file->f_pos)
+ return -ESPIPE;
divas_get_version(tmpbuf);
if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf)))
static int divas_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return (0);
}
static int divas_close(struct inode *inode, struct file *file)
-/* $Id: platform.h,v 1.37.4.1 2004/07/28 14:47:21 armin Exp $
+/* $Id: platform.h,v 1.37 2004/03/20 17:44:29 armin Exp $
*
* platform.h
*
*/
static __inline__ void diva_os_sleep(dword mSec)
{
- msleep(mSec);
+ unsigned long timeout = HZ * mSec / 1000 + 1;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
}
static __inline__ void diva_os_wait(dword mSec)
{
config HISAX_TELESPCI
bool "Teles PCI"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
config HISAX_NETJET
bool "NETjet card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the NetJet from Traverse
Technologies.
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
config HISAX_FRITZ_PCIPNP
tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.
return(0);
}
-#ifdef CONFIG_PCI
static struct pci_dev *dev_avm __initdata = NULL;
-#endif
#ifdef __ISAPNP__
static struct pnp_card *pnp_avm_c __initdata = NULL;
#endif
printk(KERN_INFO "FritzPnP: no ISA PnP present\n");
}
#endif
-#ifdef CONFIG_PCI
+#if CONFIG_PCI
if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
cs->irq = dev_avm->irq;
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
}
}
-#ifdef CONFIG_PCI
#include <linux/pci.h>
static struct pci_device_id hisax_pci_tbl[] __initdata = {
};
MODULE_DEVICE_TABLE(pci, hisax_pci_tbl);
-#endif /* CONFIG_PCI */
module_init(HiSax_init);
module_exit(HiSax_exit);
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Eicon Technology for documents and information
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Elsa GmbH for documents and information
*
byteout(cs->hw.hfcD.addr | 1, reg);
}
ret = bytein(cs->hw.hfcD.addr);
-#ifdef HFC_REG_DEBUG
+#if HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
debugl1(cs, "t3c RD %02x %02x", reg, ret);
#endif
}
if (data)
byteout(cs->hw.hfcD.addr, value);
-#ifdef HFC_REG_DEBUG
+#if HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != HFCD_DATA_NODEB))
debugl1(cs, "t3c W%c %02x %02x", data ? 'D' : 'C', reg, value);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
}
bcs->hw.tiger.s_tot += s_cnt;
if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs,"tiger write_raw: c%d %p-%p %d/%d %d %x", bcs->channel,
- buf, p, s_cnt, cnt,
+ debugl1(bcs->cs,"tiger write_raw: c%d %x-%x %d/%d %d %x", bcs->channel,
+ (u_int)buf, (u_int)p, s_cnt, cnt,
bcs->hw.tiger.sendcnt, bcs->cs->hw.njet.irqstat0);
if (bcs->cs->debug & L1_DEB_HSCX_FIFO)
printframe(bcs->cs, bcs->hw.tiger.sp, s_cnt, "snd");
cs->bcs[1].hw.tiger.s_end = cs->bcs[0].hw.tiger.s_end;
memset(cs->bcs[0].hw.tiger.send, 0xff, NETJET_DMA_TXSIZE * sizeof(unsigned int));
- debugl1(cs, "tiger: send buf %p - %p", cs->bcs[0].hw.tiger.send,
- cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1);
+ debugl1(cs, "tiger: send buf %x - %x", (u_int)cs->bcs[0].hw.tiger.send,
+ (u_int)(cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1));
outl(virt_to_bus(cs->bcs[0].hw.tiger.send),
cs->hw.njet.base + NETJET_DMA_READ_START);
outl(virt_to_bus(cs->bcs[0].hw.tiger.s_irq),
"HiSax: No memory for tiger.rec\n");
return;
}
- debugl1(cs, "tiger: rec buf %p - %p", cs->bcs[0].hw.tiger.rec,
- cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1);
+ debugl1(cs, "tiger: rec buf %x - %x", (u_int)cs->bcs[0].hw.tiger.rec,
+ (u_int)(cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1));
cs->bcs[1].hw.tiger.rec = cs->bcs[0].hw.tiger.rec;
memset(cs->bcs[0].hw.tiger.rec, 0xff, NETJET_DMA_RXSIZE * sizeof(unsigned int));
outl(virt_to_bus(cs->bcs[0].hw.tiger.rec),
st5481_usb_device_ctrl_msg(adapter, FFMSK_D, 0xfc, NULL, NULL);
st5481_in_mode(d_in, L1_MODE_HDLC);
-#ifdef LOOPBACK
+#if LOOPBACK
// Turn loopback on (data sent on B and D looped back)
st5481_usb_device_ctrl_msg(cs, LBB, 0x04, NULL, NULL);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
#
config HYSDN
tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && PCI && BROKEN_ON_SMP
+ depends on m && PROC_FS && BROKEN_ON_SMP
help
Say Y here if you have one of Hypercope's active PCI ISDN cards
Champ, Ergo and Metro. You will then get a module called hysdn.
}
}
detach_capi_ctr(ctrl);
- ctrl->driverdata = NULL;
+ ctrl->driverdata = 0;
kfree(card->hyctrlinfo);
***********************************************************/
-int hycapi_init(void)
+int hycapi_init()
{
int i;
for(i=0;i<CAPI_MAXAPPL;i++) {
/* write conf file -> boot or send cfg line to card */
/****************************************************/
static ssize_t
-hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
+hysdn_conf_write(struct file *file, const char *buf, size_t count, loff_t * off)
{
struct conf_writedata *cnf;
int i;
uchar ch, *cp;
+ if (&file->f_pos != off) /* fs error check */
+ return (-ESPIPE);
if (!count)
return (0); /* nothing to handle */
/* read conf file -> output card info data */
/*******************************************/
static ssize_t
-hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t * off)
+hysdn_conf_read(struct file *file, char *buf, size_t count, loff_t * off)
{
char *cp;
int i;
+ if (off != &file->f_pos) /* fs error check */
+ return -ESPIPE;
+
if (file->f_mode & FMODE_READ) {
if (!(cp = file->private_data))
return (-EFAULT); /* should never happen */
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return nonseekable_open(ino, filep);
+ return (0);
} /* hysdn_conf_open */
/***************************/
/* write log file -> set log level bits */
/****************************************/
static ssize_t
-hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
+hysdn_log_write(struct file *file, const char *buf, size_t count, loff_t * off)
{
ulong u = 0;
int found = 0;
long base = 10;
hysdn_card *card = (hysdn_card *) file->private_data;
+ if (&file->f_pos != off) /* fs error check */
+ return (-ESPIPE);
+
if (count > (sizeof(valbuf) - 1))
count = sizeof(valbuf) - 1; /* limit length */
if (copy_from_user(valbuf, buf, count))
/* read log file */
/******************/
static ssize_t
-hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t * off)
+hysdn_log_read(struct file *file, char *buf, size_t count, loff_t * off)
{
struct log_data *inf;
int len;
if ((len = strlen(inf->log_start)) <= count) {
if (copy_to_user(buf, inf->log_start, len))
return -EFAULT;
- *off += len;
+ file->f_pos += len;
return (len);
}
return (0);
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return nonseekable_open(ino, filep);
+ return (0);
} /* hysdn_log_open */
/*******************************************************************************/
int retval;
char *p;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
lock_kernel();
if (minor == ISDN_MINOR_STATUS) {
if (!file->private_data) {
int chidx;
int retval;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
if (minor == ISDN_MINOR_STATUS)
return -EPERM;
if (!dev->drivers)
}
#endif
out:
- nonseekable_open(ino, filep);
return retval;
}
unsigned long expires = 0;
int tmp = 0;
int period = lp->cisco_keepalive_period;
- s8 debserint = lp->cisco_debserint;
+ char debserint = lp->cisco_debserint;
int rc = 0;
if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK)
* stuff needed to support the Linux X.25 PLP code on top of devices that
* can provide a lab_b service using the concap_proto mechanism.
* This module supports a network interface wich provides lapb_sematics
- * -- as defined in Documentation/networking/x25-iface.txt -- to
+ * -- as defined in ../../Documentation/networking/x25-iface.txt -- to
* the upper layer and assumes that the lower layer provides a reliable
* data link service by means of the concap_device_ops callbacks.
*
}
/* process a frame handed over to us from linux network layer. First byte
- semantics as defined in Documentation/networking/x25-iface.txt
+ semantics as defined in ../../Documentation/networking/x25-iface.txt
*/
int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
{
#
config ISDN_DRV_PCBIT
tristate "PCBIT-D support"
- depends on ISDN_I4L && ISA && (BROKEN || !PPC)
+ depends on ISDN_I4L && ISA
help
This enables support for the PCBIT ISDN-card. This card is
manufactured in Portugal by Octal. For running this card,
card->bar0 + TPAM_PAGE_REGISTER);
/* write the value */
- writel(val, card->bar0 + (((unsigned long)addr) & TPAM_PAGE_SIZE));
+ writel(val, card->bar0 + (((u32)addr) & TPAM_PAGE_SIZE));
}
/*
events; also, the PowerBook button device will be enabled so you can
change the screen brightness.
+config MAC_FLOPPY
+ bool "Support for PowerMac floppy"
+ depends on PPC_PMAC && !PPC_PMAC64
+ help
+ If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
+ floppy controller, say Y here. Most commonly found in PowerMacs.
+
config MAC_SERIAL
tristate "Support for PowerMac serial ports (OBSOLETE DRIVER)"
depends on PPC_PMAC && BROKEN
G5 machines.
config ANSLCD
- tristate "Support for ANS LCD display"
+ bool "Support for ANS LCD display"
depends on ADB_CUDA && PPC_PMAC
endmenu
static __inline__ void adb_wait_ms(unsigned int ms)
{
if (current->pid && adb_probe_task_pid &&
- adb_probe_task_pid == current->pid)
- msleep(ms);
- else
+ adb_probe_task_pid == current->pid) {
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1 + ms * HZ / 1000);
+ } else
mdelay(ms);
}
write_lock_irq(&adb_handler_lock);
}
ret = 0;
- adb_handler[index].handler = NULL;
+ adb_handler[index].handler = 0;
}
write_unlock_irq(&adb_handler_lock);
up(&adb_handler_sem);
#define FLAG_POWER_FROM_FN 0x00000002
#define FLAG_EMU_FWDEL_DOWN 0x00000004
-static struct adbhid *adbhid[16];
+static struct adbhid *adbhid[16] = { 0 };
static void adbhid_probe(void);
if (adbhid[id]->keycode)
kfree(adbhid[id]->keycode);
kfree(adbhid[id]);
- adbhid[id] = NULL;
+ adbhid[id] = 0;
}
}
static ssize_t __pmac
-anslcd_write( struct file * file, const char __user * buf,
+anslcd_write( struct file * file, const char * buf,
size_t count, loff_t *ppos )
{
- const char __user *p = buf;
+ const char * p = buf;
int i;
#ifdef DEBUG
anslcd_ioctl( struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg )
{
- char ch, __user *temp;
+ char ch, *temp;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
anslcd_write_byte_ctrl ( 0x02 );
return 0;
case ANSLCD_SENDCTRL:
- temp = (char __user *) arg;
+ temp = (char *) arg;
__get_user(ch, temp);
for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */
anslcd_write_byte_ctrl ( ch );
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
-static int __init
+int __init
anslcd_init(void)
{
int a;
return 0;
}
-static void __exit
-anslcd_exit(void)
-{
- misc_deregister(&anslcd_dev);
- iounmap(anslcd_ptr);
-}
+__initcall(anslcd_init);
-module_init(anslcd_init);
-module_exit(anslcd_exit);
req->data[i] = req->data[i+1];
--req->nbytes;
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
local_irq_save(flags);
if (in_8(&adb->intr.r) != 0)
- macio_adb_interrupt(0, NULL, NULL);
+ macio_adb_interrupt(0, 0, 0);
local_irq_restore(flags);
}
static void rxdma_start(struct mac_serial * info, int current);
static void rxdma_to_tty(struct mac_serial * info);
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
* lock it in case the copy_from_user blocks while swapping in a page,
if (from_user) {
down(&tmp_buf_sem);
while (1) {
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(count,
+ MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0)
break;
break;
}
spin_lock_irqsave(&info->lock, flags);
- c = min_t(int, c, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c);
info->xmit_head = ((info->xmit_head + c) &
(SERIAL_XMIT_SIZE-1));
} else {
while (1) {
spin_lock_irqsave(&info->lock, flags);
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(count,
+ MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0) {
spin_unlock_irqrestore(&info->lock, flags);
break;
} else if (char_time == 0)
char_time = 1;
if (timeout)
- char_time = min_t(unsigned long, char_time, timeout);
+ char_time = MIN(char_time, timeout);
while ((read_zsreg(info->zs_channel, 1) & ALL_SNT) == 0) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(char_time);
#endif /* CONFIG_BLK_DEV_IDE */
return -ENODEV;
}
-EXPORT_SYMBOL(check_media_bay);
int __pmac check_media_bay_by_base(unsigned long base, int what)
{
/* Force an immediate detect */
set_mb_power(bay, 0);
- msleep(MB_POWER_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
bay->content_id = MB_NO;
bay->last_value = bay->ops->content(bay);
bay->value_count = MS_TO_HZ(MB_STABLE_DELAY);
bay->state = mb_empty;
do {
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
media_bay_step(i);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
bay->sleeping = 1;
set_mb_power(bay, 0);
up(&bay->lock);
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
mdev->ofdev.dev.power_state = state;
}
return 0;
/* Force MB power to 0 */
down(&bay->lock);
set_mb_power(bay, 0);
- msleep(MB_POWER_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
if (bay->ops->content(bay) != bay->content_id) {
printk("mediabay%d: content changed during sleep...\n", bay->index);
up(&bay->lock);
bay->cd_retry = 0;
#endif
do {
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
media_bay_step(bay->index);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
return nw;
}
-static int start_fcu(void)
-{
- unsigned char buf = 0xff;
- int rc;
-
- rc = fan_write_reg(0xe, &buf, 1);
- if (rc < 0)
- return -EIO;
- rc = fan_write_reg(0x2e, &buf, 1);
- if (rc < 0)
- return -EIO;
- return 0;
-}
-
static int set_rpm_fan(int fan, int rpm)
{
unsigned char buf[2];
down(&driver_lock);
- if (start_fcu() < 0) {
- printk(KERN_ERR "kfand: failed to start FCU\n");
- up(&driver_lock);
- goto out;
- }
-
/* Set the PCI fan once for now */
set_pwm_fan(SLOTS_FAN_PWM_ID, SLOTS_FAN_DEFAULT_PWM);
schedule_timeout(HZ - elapsed);
}
- out:
DBG("main_control_loop ended\n");
ctrl_task = 0;
req->complete = 1;
return -EINVAL;
}
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
* disable_irq(), would that work on m68k ? --BenH
*/
local_irq_save(flags);
- cuda_interrupt(0, NULL, NULL);
+ cuda_interrupt(0, 0, 0);
local_irq_restore(flags);
}
}
#endif /* CONFIG_PMAC_PBOOK */
/* Create /proc/pmu */
- proc_pmu_root = proc_mkdir("pmu", NULL);
+ proc_pmu_root = proc_mkdir("pmu", 0);
if (proc_pmu_root) {
int i;
proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root,
}
if (pmu_state == idle)
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
udelay(10);
}
return -EINVAL;
}
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
return;
if (disable_poll)
return;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
}
void __openfirmware
/* Kicks ADB read when PMU is suspended */
adb_int_pending = 1;
do {
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
} while (pmu_suspended && (adb_int_pending || pmu_state != idle
|| req_awaiting_reply));
}
if (!via)
return;
while((pmu_state != idle && pmu_state != locked) || !req->complete)
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
}
/* This function loops until the PMU is idle and prevents it from
spin_unlock_irqrestore(&pmu_lock, flags);
if (req_awaiting_reply)
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
spin_lock_irqsave(&pmu_lock, flags);
if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
#ifdef SUSPEND_USES_PMU
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
- req_awaiting_reply = NULL;
+ req_awaiting_reply = 0;
if (len <= 2)
req->reply_len = 0;
else {
pmu_irq_stats[1]++;
adb_int_pending = 1;
spin_unlock_irqrestore(&pmu_lock, flags);
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
if (n->list.next == 0)
return -ENOENT;
list_del(&n->list);
- n->list.next = NULL;
+ n->list.next = 0;
return 0;
}
/* Force a poll of ADB interrupts */
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
/* Restart jiffies & scheduling */
wakeup_decrementer();
lock_kernel();
if (pp != 0) {
- file->private_data = NULL;
+ file->private_data = 0;
spin_lock_irqsave(&all_pvt_lock, flags);
list_del(&pp->list);
spin_unlock_irqrestore(&all_pvt_lock, flags);
u_int cmd, u_long arg)
{
struct pmu_private *pp = filp->private_data;
- __u32 __user *argp = (__u32 __user *)arg;
int error;
switch (cmd) {
sleep_in_progress = 0;
return error;
case PMU_IOC_CAN_SLEEP:
- return put_user((u32)can_sleep, argp);
+ return put_user((u32)can_sleep, (__u32 *)arg);
#ifdef CONFIG_PMAC_BACKLIGHT
/* Backlight should have its own device or go via
error = get_backlight_level();
if (error < 0)
return error;
- return put_user(error, argp);
+ return put_user(error, (__u32 *)arg);
case PMU_IOC_SET_BACKLIGHT:
{
__u32 value;
if (sleep_in_progress)
return -EBUSY;
- error = get_user(value, argp);
+ error = get_user(value, (__u32 *)arg);
if (!error)
error = set_backlight_level(value);
return error;
#endif /* CONFIG_INPUT_ADBHID */
#endif /* CONFIG_PMAC_BACKLIGHT */
case PMU_IOC_GET_MODEL:
- return put_user(pmu_kind, argp);
+ return put_user(pmu_kind, (__u32 *)arg);
case PMU_IOC_HAS_ADB:
- return put_user(pmu_has_adb, argp);
+ return put_user(pmu_has_adb, (__u32 *)arg);
}
return -EINVAL;
}
/*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
};
-int pmu_probe(void)
+int pmu_probe()
{
if (macintosh_config->adb_type == MAC_ADB_PB1) {
pmu_kind = PMU_68K_V1;
}
static void
-recv_byte(void)
+recv_byte()
{
char c;
}
static void
-pmu_start(void)
+pmu_start()
{
unsigned long flags;
struct adb_request *req;
}
void
-pmu_poll(void)
+pmu_poll()
{
unsigned long flags;
if (uptodate)
multipath_end_bh_io(mp_bh, uptodate);
- else if ((bio->bi_rw & (1 << BIO_RW_AHEAD)) == 0) {
+ else {
/*
* oops, IO error:
*/
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh);
- } else
- multipath_end_bh_io(mp_bh, 0);
+ }
rdev_dec_pending(rdev, conf->mddev);
return 0;
}
" to another IO path\n",
bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_sector);
- *bio = *(mp_bh->master_bio);
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
- bio->bi_end_io = multipath_end_request;
- bio->bi_private = mp_bh;
generic_make_request(bio);
}
}
*/
#define RAID5_DEBUG 0
#define RAID5_PARANOIA 1
-#if RAID5_PARANOIA && defined(CONFIG_SMP)
+#if RAID5_PARANOIA && CONFIG_SMP
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
#define RAID6_DEBUG 0 /* Extremely verbose printk */
#define RAID6_PARANOIA 1 /* Check spinlocks */
#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
-#if RAID6_PARANOIA && defined(CONFIG_SMP)
+#if RAID6_PARANOIA && CONFIG_SMP
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
config DVB_B2C2_SKYSTAR
tristate "Technisat Skystar2 PCI"
- depends on DVB_CORE && PCI
+ depends on DVB_CORE
help
Support for the Skystar2 PCI DVB card by Technisat, which
is equipped with the FlexCopII chipset by B2C2.
neq |= f->maskandnotmode[i] & xor;
}
- if (f->doneq && !neq)
+ if (f->doneq & !neq)
return 0;
return feed->cb.sec (feed->feed.sec.secbuf, feed->feed.sec.seclen,
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
- /*
- * For this command, the pointer is actually an integer
- * argument.
- */
- parg = (void *) arg;
+ parg = NULL;
break;
case _IOC_READ: /* some v4l ioctls are marked wrong ... */
case _IOC_WRITE:
#include "dvb_functions.h"
-static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
+static inline __u32 iov_crc32( __u32 c, struct iovec *iov, unsigned int cnt )
{
unsigned int j;
for (j = 0; j < cnt; j++)
/* Check CRC32, we've got it in our skb already. */
unsigned short ulen = htons(priv->ule_sndu_len);
unsigned short utype = htons(priv->ule_sndu_type);
- struct kvec iov[4] = {
+ struct iovec iov[4] = {
{ &ulen, sizeof ulen },
{ &utype, sizeof utype },
{ NULL, 0 },
#include <linux/list.h>
#include <linux/devfs_fs_kernel.h>
-#define DVB_MAJOR 212
+#define DVB_MAJOR 250
#define DVB_DEVICE_VIDEO 0
#define DVB_DEVICE_AUDIO 1
*/
+
+#define __KERNEL_SYSCALLS__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
loff_t filesize;
char *dp;
- fd = sys_open(fn, 0, 0);
+ fd = open(fn, 0, 0);
if (fd == -1) {
printk("%s: unable to open '%s'.\n", __FUNCTION__, fn);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0 || filesize < SP8870_FIRMWARE_OFFSET + SP8870_FIRMWARE_SIZE) {
printk("%s: firmware filesize to small '%s'\n", __FUNCTION__, fn);
sys_close(fd);
return -EIO;
}
- sys_lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
- if (sys_read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
+ lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
+ if (read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
printk("%s: failed to read '%s'.\n",__FUNCTION__, fn);
vfree(dp);
sys_close(fd);
next 0x4000 loaded. This may change in future versions.
*/
+#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
// Load the firmware
set_fs(get_ds());
- fd = sys_open(sp887x_firmware, 0, 0);
+ fd = open(sp887x_firmware, 0, 0);
if (fd < 0) {
printk(KERN_WARNING "%s: Unable to open firmware %s\n", __FUNCTION__,
sp887x_firmware);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0) {
printk(KERN_WARNING "%s: Firmware %s is empty\n", __FUNCTION__,
sp887x_firmware);
// read it!
// read the first 16384 bytes from the file
// ignore the first 10 bytes
- sys_lseek(fd, 10, 0);
- if (sys_read(fd, firmware, fw_size) != fw_size) {
+ lseek(fd, 10, 0);
+ if (read(fd, firmware, fw_size) != fw_size) {
printk(KERN_WARNING "%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
*/
+#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/unistd.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
// Load the firmware
set_fs(get_ds());
- fd = sys_open(tda1004x_firmware, 0, 0);
+ fd = open(tda1004x_firmware, 0, 0);
if (fd < 0) {
printk("%s: Unable to open firmware %s\n", __FUNCTION__,
tda1004x_firmware);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0) {
printk("%s: Firmware %s is empty\n", __FUNCTION__,
tda1004x_firmware);
}
// read it!
- sys_lseek(fd, fw_offset, 0);
- if (sys_read(fd, firmware, fw_size) != fw_size) {
+ lseek(fd, fw_offset, 0);
+ if (read(fd, firmware, fw_size) != fw_size) {
printk("%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_DEV && PCI
+ depends on VIDEO_DEV
---help---
Choose Y here if you have this radio card. This card may also be
found as Gemtek PCI FM.
if ((i=aci_rw_cmd(ACI_READ_TUNERSTATION, -1, -1))<0)
return i;
-#ifdef DEBUG
+#if DEBUG
printk("check_sig: 0x%x\n", i);
#endif
if (i & 0x80) {
if ((i=aci_rds_cmd(RDS_RXVALUE, &buf, 1))<0)
return i;
-#ifdef DEBUG
+#if DEBUG
printk("rds-signal: %d\n", buf);
#endif
if (buf > 15) {
unsigned long *freq = arg;
pcm20->freq = *freq;
i=pcm20_setfreq(pcm20, pcm20->freq);
-#ifdef DEBUG
+#if DEBUG
printk("First view (setfreq): 0x%x\n", i);
#endif
return i;
struct saa7134_buf *buf)
{
struct saa7134_buf *next = NULL;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
struct saa7134_dmaqueue *q,
unsigned int state)
{
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
dprintk("buffer_finish %p\n",q->curr);
{
struct saa7134_buf *buf,*next = NULL;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
BUG_ON(NULL != q->curr);
enum v4l2_field cap = V4L2_FIELD_ANY;
enum v4l2_field ov = V4L2_FIELD_ANY;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
#include <linux/videodev.h>
#include <linux/spinlock.h>
#include <linux/sem.h>
-#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <asm/io.h>
{NULL, 0, 0, 0},
};
+struct procfs_io {
+ char *buffer;
+ char *end;
+ int neof;
+ int count;
+ int count_current;
+};
+
static void
setparam (struct zoran *zr,
char *name,
}
}
-static int zoran_show(struct seq_file *p, void *v)
+static int
+print_procfs (struct procfs_io *io,
+ const char *fmt,
+ ...)
{
- struct zoran *zr = p->private;
+ va_list args;
int i;
- seq_printf(p, "ZR36067 registers:\n");
- for (i = 0; i < 0x130; i += 16)
- seq_printf(p, "%03X %08X %08X %08X %08X \n", i,
- btread(i), btread(i+4), btread(i+8), btread(i+12));
- return 0;
+ if (io->buffer >= io->end) {
+ io->neof++;
+ return 0;
+ }
+ if (io->count > io->count_current++)
+ return 0;
+ va_start(args, fmt);
+ i = vsprintf(io->buffer, fmt, args);
+ io->buffer += i;
+ va_end(args);
+ return i;
}
-static int zoran_open(struct inode *inode, struct file *file)
+static void
+zoran_procfs_output (struct procfs_io *io,
+ void *data)
{
- struct zoran *data = PDE(inode)->data;
- return single_open(file, zoran_show, data);
+ int i;
+ struct zoran *zr;
+ zr = (struct zoran *) data;
+
+ print_procfs(io, "ZR36067 registers:");
+ for (i = 0; i < 0x130; i += 4) {
+ if (!(i % 16)) {
+ print_procfs(io, "\n%03X", i);
+ };
+ print_procfs(io, " %08X ", btread(i));
+ };
+ print_procfs(io, "\n");
}
-static ssize_t zoran_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
+static int
+zoran_read_proc (char *buffer,
+ char **start,
+ off_t offset,
+ int size,
+ int *eof,
+ void *data)
+{
+ struct procfs_io io;
+ int nbytes;
+
+ io.buffer = buffer;
+ io.end = buffer + size - 128; // Just to make it a little bit safer
+ io.count = offset;
+ io.count_current = 0;
+ io.neof = 0;
+ zoran_procfs_output(&io, data);
+ *start = (char *) (io.count_current - io.count);
+ nbytes = (int) (io.buffer - buffer);
+ *eof = !io.neof;
+ return nbytes;
+
+ return 0;
+}
+
+static int
+zoran_write_proc (struct file *file,
+ const char __user *buffer,
+ unsigned long count,
+ void *data)
{
- struct zoran *zr = PDE(file->f_dentry->d_inode)->data;
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
+ struct zoran *zr;
if (count > 32768) /* Stupidity filter */
return -EINVAL;
+ zr = (struct zoran *) data;
+
string = sp = vmalloc(count + 1);
if (!string) {
dprintk(1,
return -EFAULT;
}
string[count] = 0;
- dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%zu zr=%p\n",
- ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, zr);
+ dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%lu data=%x\n",
+ ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, (int) data);
ldelim = " \t\n";
tdelim = "=";
line = strpbrk(sp, ldelim);
return count;
}
-
-static struct file_operations zoran_operations = {
- .open = zoran_open,
- .read = seq_read,
- .write = zoran_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
#endif
int
snprintf(name, 7, "zoran%d", zr->id);
if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) {
+ zr->zoran_proc->read_proc = zoran_read_proc;
+ zr->zoran_proc->write_proc = zoran_write_proc;
zr->zoran_proc->data = zr;
zr->zoran_proc->owner = THIS_MODULE;
- zr->zoran_proc->proc_fops = &zoran_operations;
dprintk(2,
KERN_INFO
"%s: procfs entry /proc/%s allocated. data=%p\n",
char name[8];
snprintf(name, 7, "zoran%d", zr->id);
- if (zr->zoran_proc)
+ if (zr->zoran_proc) {
remove_proc_entry(name, NULL);
+ }
zr->zoran_proc = NULL;
#endif
}
-# $Id: Kconfig,v 1.6 2004/08/09 13:19:42 dwmw2 Exp $
+# $Id: Kconfig,v 1.3 2003/05/28 11:02:23 dwmw2 Exp $
menu "Memory Technology Devices (MTD)"
Determines the verbosity level of the MTD debugging messages.
config MTD_PARTITIONS
- bool "MTD partitioning support"
+ tristate "MTD partitioning support"
depends on MTD
help
If you have a device which needs to divide its flash chip(s) up
SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
example.
-config MTD_REDBOOT_PARTS_UNALLOCATED
- bool " Include unallocated flash regions"
- depends on MTD_REDBOOT_PARTS
- help
- If you need to register each unallocated flash region as a MTD
- 'partition', enable this option.
-
-config MTD_REDBOOT_PARTS_READONLY
- bool " Force read-only for RedBoot system images"
- depends on MTD_REDBOOT_PARTS
- help
- If you need to force read-only for 'RedBoot', 'RedBoot Config' and
- 'FIS directory' images, enable this option.
-
config MTD_CMDLINE_PARTS
- bool "Command line partition table parsing"
- depends on MTD_PARTITIONS = "y"
+ tristate "Command line partition table parsing"
+ depends on MTD_PARTITIONS
---help---
Allow generic configuration of the MTD paritition tables via the kernel
command line. Multiple flash resources are supported for hardware where
#
# Makefile for the memory technology device drivers.
#
-# $Id: Makefile.common,v 1.5 2004/08/10 20:51:49 dwmw2 Exp $
+# $Id: Makefile.common,v 1.2 2003/05/23 11:38:29 dwmw2 Exp $
-# Core functionality.
-mtd-y := mtdcore.o
-mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
-obj-$(CONFIG_MTD) += $(mtd-y)
+# *** BIG UGLY NOTE ***
+#
+# The shiny new inter_module_xxx has introduced yet another ugly link
+# order dependency, which I'd previously taken great care to avoid.
+# We now have to ensure that the chip drivers are initialised before the
+# map drivers, and that the doc200[01] drivers are initialised before
+# docprobe.
+#
+# We'll hopefully merge the doc200[01] drivers and docprobe back into
+# a single driver some time soon, but the CFI drivers are going to have
+# to stay like that.
+#
+# Urgh.
+#
+# dwmw2 21/11/0
+# Core functionality.
+obj-$(CONFIG_MTD) += mtdcore.o
obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
This is access code for flashes using ARM's flash partitioning
standards.
- $Id: afs.c,v 1.13 2004/02/27 22:09:59 rmk Exp $
+ $Id: afs.c,v 1.12 2003/06/13 15:31:06 rmk Exp $
======================================================================*/
# drivers/mtd/chips/Kconfig
-# $Id: Kconfig,v 1.9 2004/07/16 15:32:14 dwmw2 Exp $
+# $Id: Kconfig,v 1.3 2003/05/28 15:13:24 dwmw2 Exp $
menu "RAM/ROM/Flash chip drivers"
depends on MTD!=n
arrangements of CFI chips. If unsure, say 'N' and all options
which are supported by the current code will be enabled.
-config MTD_MAP_BANK_WIDTH_1
- bool "Support 8-bit buswidth" if MTD_CFI_GEOMETRY
- default y
+config MTD_CFI_B1
+ bool "Support 8-bit buswidth"
+ depends on MTD_CFI_GEOMETRY
help
If you wish to support CFI devices on a physical bus which is
8 bits wide, say 'Y'.
-config MTD_MAP_BANK_WIDTH_2
- bool "Support 16-bit buswidth" if MTD_CFI_GEOMETRY
- default y
+config MTD_CFI_B2
+ bool "Support 16-bit buswidth"
+ depends on MTD_CFI_GEOMETRY
help
If you wish to support CFI devices on a physical bus which is
16 bits wide, say 'Y'.
-config MTD_MAP_BANK_WIDTH_4
- bool "Support 32-bit buswidth" if MTD_CFI_GEOMETRY
- default y
+config MTD_CFI_B4
+ bool "Support 32-bit buswidth"
+ depends on MTD_CFI_GEOMETRY
help
If you wish to support CFI devices on a physical bus which is
32 bits wide, say 'Y'.
-config MTD_MAP_BANK_WIDTH_8
- bool "Support 64-bit buswidth" if MTD_CFI_GEOMETRY
- default n
+config MTD_CFI_B8
+ bool "Support 64-bit buswidth"
+ depends on MTD_CFI_GEOMETRY
help
If you wish to support CFI devices on a physical bus which is
64 bits wide, say 'Y'.
-config MTD_MAP_BANK_WIDTH_16
- bool "Support 128-bit buswidth" if MTD_CFI_GEOMETRY
- default n
- help
- If you wish to support CFI devices on a physical bus which is
- 128 bits wide, say 'Y'.
-
-config MTD_MAP_BANK_WIDTH_32
- bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
- default n
- help
- If you wish to support CFI devices on a physical bus which is
- 256 bits wide, say 'Y'.
-
config MTD_CFI_I1
- bool "Support 1-chip flash interleave" if MTD_CFI_GEOMETRY
- default y
+ bool "Support 1-chip flash interleave" if !MTD_CFI_B1
+ depends on MTD_CFI_GEOMETRY
+ default y if MTD_CFI_B1
help
If your flash chips are not interleaved - i.e. you only have one
flash chip addressed by each bus cycle, then say 'Y'.
config MTD_CFI_I2
- bool "Support 2-chip flash interleave" if MTD_CFI_GEOMETRY
- default y
+ bool "Support 2-chip flash interleave"
+ depends on MTD_CFI_GEOMETRY
help
If your flash chips are interleaved in pairs - i.e. you have two
flash chips addressed by each bus cycle, then say 'Y'.
config MTD_CFI_I4
- bool "Support 4-chip flash interleave" if MTD_CFI_GEOMETRY
- default n
+ bool "Support 4-chip flash interleave"
+ depends on MTD_CFI_GEOMETRY
help
If your flash chips are interleaved in fours - i.e. you have four
flash chips addressed by each bus cycle, then say 'Y'.
config MTD_CFI_I8
- bool "Support 8-chip flash interleave" if MTD_CFI_GEOMETRY
- default n
+ bool "Support 8-chip flash interleave"
+ depends on MTD_CFI_GEOMETRY
help
If your flash chips are interleaved in eights - i.e. you have eight
flash chips addressed by each bus cycle, then say 'Y'.
provides support for one of those command sets, used on chips
including the AMD Am29LV320.
-config MTD_CFI_AMDSTD_RETRY
- int "Retry failed commands (erase/program)"
- depends on MTD_CFI_AMDSTD
- default "0"
- help
- Some chips, when attached to a shared bus, don't properly filter
- bus traffic that is destined to other devices. This broken
- behavior causes erase and program sequences to be aborted when
- the sequences are mixed with traffic for other devices.
-
- SST49LF040 (and related) chips are know to be broken.
-
-config MTD_CFI_AMDSTD_RETRY_MAX
- int "Max retries of failed commands (erase/program)"
- depends on MTD_CFI_AMDSTD_RETRY
- default "0"
- help
- If you have an SST49LF040 (or related chip) then this value should
- be set to at least 1. This can also be adjusted at driver load
- time with the retry_cmd_max module parameter.
-
config MTD_CFI_STAA
tristate "Support for ST (Advanced Architecture) flash chips"
depends on MTD_GEN_PROBE
sets which a CFI-compliant chip may claim to implement. This code
provides support for one of those command sets.
-config MTD_CFI_UTIL
- tristate
- default y if MTD_CFI_INTELEXT=y || MTD_CFI_AMDSTD=y || MTD_CFI_STAA=y
- default m if MTD_CFI_INTELEXT=m || MTD_CFI_AMDSTD=m || MTD_CFI_STAA=m
-
config MTD_RAM
tristate "Support for RAM chips in bus mapping"
depends on MTD
with this driver will return -ENODEV upon access.
config MTD_OBSOLETE_CHIPS
- depends on MTD && BROKEN
bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
help
This option does not enable any code directly, but will allow you to
#
# linux/drivers/chips/Makefile
#
-# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $
+# $Id: Makefile.common,v 1.1 2003/05/21 15:00:01 dwmw2 Exp $
# *** BIG UGLY NOTE ***
#
# The removal of get_module_symbol() and replacement with
# inter_module_register() et al has introduced a link order dependency
# here where previously there was none. We now have to ensure that
-# the CFI command set drivers are linked before gen_probe.o
+# the CFI command set drivers are linked before cfi_probe.o
obj-$(CONFIG_MTD) += chipreg.o
obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
obj-$(CONFIG_MTD_CFI) += cfi_probe.o
-obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
*
* Author: Jonas Holmberg <jonas.holmberg@axis.com>
*
- * $Id: amd_flash.c,v 1.25 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: amd_flash.c,v 1.23 2003/06/12 09:24:13 dwmw2 Exp $
*
* Copyright (c) 2001 Axis Communications AB
*
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ instr->callback(instr);
+ }
return 0;
}
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: cfi_cmdset_0001.c,v 1.126 2003/06/23 07:45:48 dwmw2 Exp $
*
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/cfi.h>
-/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
-
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-//static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-//static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
static struct mtd_info *cfi_intelext_setup (struct map_info *);
-static int cfi_intelext_partition_fixup(struct map_info *, struct cfi_private **);
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char **mtdbuf);
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
int i;
- printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
- printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
- printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
- printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
- printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
- printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
- printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
- printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
- printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
- printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
- printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
- for (i=10; i<32; i++) {
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ for (i=9; i<32; i++) {
if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
}
printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
- extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
+ extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
- extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
-}
-#endif
-
-#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
-/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
-static void fixup_intel_strataflash(struct map_info *map, void* param)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
-
- printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
- "erase on write disabled.\n");
- extp->SuspendCmdSupport &= ~1;
-}
-#endif
-
-static void fixup_st_m28w320ct(struct map_info *map, void* param)
-{
- struct cfi_private *cfi = map->fldrv_priv;
-
- cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
- cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
+ extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
}
-
-static void fixup_st_m28w320cb(struct map_info *map, void* param)
-{
- struct cfi_private *cfi = map->fldrv_priv;
-
- /* Note this is done after the region info is endian swapped */
- cfi->cfiq->EraseRegionInfo[1] =
- (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
-};
-
-static struct cfi_fixup fixup_table[] = {
-#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
- {
- CFI_MFR_ANY, CFI_ID_ANY,
- fixup_intel_strataflash, NULL
- },
#endif
- {
- 0x0020, /* STMicroelectronics */
- 0x00ba, /* M28W320CT */
- fixup_st_m28w320ct, NULL
- }, {
- 0x0020, /* STMicroelectronics */
- 0x00bb, /* M28W320CB */
- fixup_st_m28w320cb, NULL
- }, {
- 0, 0, NULL, NULL
- }
-};
/* This routine is made available to other mtd code via
* inter_module_register. It must only be accessed through
{
struct cfi_private *cfi = map->fldrv_priv;
int i;
+ __u32 base = cfi->chips[0].start;
if (cfi->cfi_mode == CFI_MODE_CFI) {
/*
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
+ int ofs_factor = cfi->interleave * cfi->device_type;
- extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "Intel/Sharp");
- if (!extp)
+ //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
+ if (!adr)
return NULL;
+
+ /* Switch it into Query Mode */
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ extp = kmalloc(sizeof(*extp), GFP_KERNEL);
+ if (!extp) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ return NULL;
+ }
+
+ /* Read in the Extended Query Table */
+ for (i=0; i<sizeof(*extp); i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, (base+((adr+i)*ofs_factor)));
+ }
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
+ printk(KERN_WARNING " Unknown IntelExt Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
/* Do some byteswapping if necessary */
extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
-
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
-
- cfi_fixup(map, fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
#endif
if(extp->SuspendCmdSupport & 1) {
+//#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
+ printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
+ "erase on write disabled.\n");
+ extp->SuspendCmdSupport &= ~1;
+#else
printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
+#endif
}
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
}
for (i=0; i< cfi->numchips; i++) {
map->fldrv = &cfi_intelext_chipdrv;
+ /* Make sure it's in read mode */
+ cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
return cfi_intelext_setup(map);
}
printk(KERN_INFO "Using word write method\n" );
mtd->write = cfi_intelext_write_words;
}
-#if 0
mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
-#endif
mtd->sync = cfi_intelext_sync;
mtd->lock = cfi_intelext_lock;
mtd->unlock = cfi_intelext_unlock;
mtd->flags = MTD_CAP_NORFLASH;
map->fldrv = &cfi_intelext_chipdrv;
mtd->name = map->name;
-
- /* This function has the potential to distort the reality
- a bit and therefore should be called last. */
- if (cfi_intelext_partition_fixup(map, &cfi) != 0)
- goto setup_err;
-
__module_get(THIS_MODULE);
return mtd;
kfree(mtd);
}
kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
return NULL;
}
-static int cfi_intelext_partition_fixup(struct map_info *map,
- struct cfi_private **pcfi)
-{
- struct cfi_private *cfi = *pcfi;
- struct cfi_pri_intelext *extp = cfi->cmdset_priv;
-
- /*
- * Probing of multi-partition flash ships.
- *
- * This is extremely crude at the moment and should probably be
- * extracted entirely from the Intel extended query data instead.
- * Right now a L18 flash is assumed if multiple operations is
- * detected.
- *
- * To support multiple partitions when available, we simply arrange
- * for each of them to have their own flchip structure even if they
- * are on the same physical chip. This means completely recreating
- * a new cfi_private structure right here which is a blatent code
- * layering violation, but this is still the least intrusive
- * arrangement at this point. This can be rearranged in the future
- * if someone feels motivated enough. --nico
- */
- if (extp && extp->FeatureSupport & (1 << 9)) {
- struct cfi_private *newcfi;
- struct flchip *chip;
- struct flchip_shared *shared;
- int numparts, partshift, numvirtchips, i, j;
-
- /*
- * The L18 flash memory array is divided
- * into multiple 8-Mbit partitions.
- */
- numparts = 1 << (cfi->cfiq->DevSize - 20);
- partshift = 20 + __ffs(cfi->interleave);
- numvirtchips = cfi->numchips * numparts;
-
- newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
- if (!newcfi)
- return -ENOMEM;
- shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
- if (!shared) {
- kfree(newcfi);
- return -ENOMEM;
- }
- memcpy(newcfi, cfi, sizeof(struct cfi_private));
- newcfi->numchips = numvirtchips;
- newcfi->chipshift = partshift;
-
- chip = &newcfi->chips[0];
- for (i = 0; i < cfi->numchips; i++) {
- shared[i].writing = shared[i].erasing = NULL;
- spin_lock_init(&shared[i].lock);
- for (j = 0; j < numparts; j++) {
- *chip = cfi->chips[i];
- chip->start += j << partshift;
- chip->priv = &shared[i];
- /* those should be reset too since
- they create memory references. */
- init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
- chip++;
- }
- }
-
- printk(KERN_DEBUG "%s: %d sets of %d interleaved chips "
- "--> %d partitions of %#x bytes\n",
- map->name, cfi->numchips, cfi->interleave,
- newcfi->numchips, 1<<newcfi->chipshift);
-
- map->fldrv_priv = newcfi;
- *pcfi = newcfi;
- kfree(cfi);
- }
-
- return 0;
-}
-
/*
* *********** CHIP ACCESS FUNCTIONS ***********
*/
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
+ cfi_word status, status_OK = CMD(0x80);
unsigned long timeo;
- struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ struct cfi_pri_intelext *cfip = (struct cfi_pri_intelext *)cfi->cmdset_priv;
resettime:
timeo = jiffies + HZ;
retry:
- if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
- /*
- * OK. We have possibility for contension on the write/erase
- * operations which are global to the real chip and not per
- * partition. So let's fight it over in the partition which
- * currently has authority on the operation.
- *
- * The rules are as follows:
- *
- * - any write operation must own shared->writing.
- *
- * - any erase operation must own _both_ shared->writing and
- * shared->erasing.
- *
- * - contension arbitration is handled in the owner's context.
- *
- * The 'shared' struct can be read when its lock is taken.
- * However any writes to it can only be made when the current
- * owner's lock is also held.
- */
- struct flchip_shared *shared = chip->priv;
- struct flchip *contender;
- spin_lock(&shared->lock);
- contender = shared->writing;
- if (contender && contender != chip) {
- /*
- * The engine to perform desired operation on this
- * partition is already in use by someone else.
- * Let's fight over it in the context of the chip
- * currently using it. If it is possible to suspend,
- * that other partition will do just that, otherwise
- * it'll happily send us to sleep. In any case, when
- * get_chip returns success we're clear to go ahead.
- */
- int ret = spin_trylock(contender->mutex);
- spin_unlock(&shared->lock);
- if (!ret)
- goto retry;
- spin_unlock(chip->mutex);
- ret = get_chip(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
- if (ret) {
- spin_unlock(contender->mutex);
- return ret;
- }
- timeo = jiffies + HZ;
- spin_lock(&shared->lock);
- }
-
- /* We now own it */
- shared->writing = chip;
- if (mode == FL_ERASING)
- shared->erasing = chip;
- if (contender && contender != chip)
- spin_unlock(contender->mutex);
- spin_unlock(&shared->lock);
- }
-
switch (chip->state) {
case FL_STATUS:
for (;;) {
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
- break;
-
- /* At this point we're fine with write operations
- in other partitions as they don't conflict. */
- if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
if (time_after(jiffies, timeo)) {
- printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
- status.x[0]);
+ printk(KERN_ERR "Waiting for chip to be ready timed out. Status %llx\n",
+ (long long)status);
+ spin_unlock(chip->mutex);
return -EIO;
}
spin_unlock(chip->mutex);
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ cfi_write(map, CMD(0xB0), adr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
if (time_after(jiffies, timeo)) {
/* Urgh. Resume and pretend we weren't here. */
- map_write(map, CMD(0xd0), adr);
+ cfi_write(map, CMD(0xd0), adr);
/* Make sure we're in 'read status' mode if it had finished */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_ERASING;
chip->oldstate = FL_READY;
printk(KERN_ERR "Chip not ready after erase "
- "suspended: status = 0x%lx\n", status.x[0]);
+ "suspended: status = 0x%llx\n", status);
return -EIO;
}
{
struct cfi_private *cfi = map->fldrv_priv;
- if (chip->priv) {
- struct flchip_shared *shared = chip->priv;
- spin_lock(&shared->lock);
- if (shared->writing == chip) {
- /* We own the ability to write, but we're done */
- shared->writing = shared->erasing;
- if (shared->writing && shared->writing != chip) {
- /* give back ownership to who we loaned it from */
- struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
- spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
- put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
- } else {
- if (chip->oldstate != FL_ERASING) {
- shared->erasing = NULL;
- if (chip->oldstate != FL_WRITING)
- shared->writing = NULL;
- }
- spin_unlock(&shared->lock);
- }
- }
- }
-
switch(chip->oldstate) {
case FL_ERASING:
chip->state = chip->oldstate;
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0xd0), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_READY:
case FL_STATUS:
- case FL_JEDEC_QUERY:
/* We should really make set_vpp() count, rather than doing this */
DISABLE_VPP(map);
break;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
- cmd_addr = adr & ~(map_bankwidth(map)-1);
+ cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
spin_lock(chip->mutex);
if (!ret) {
if (chip->state != FL_POINT && chip->state != FL_READY)
- map_write(map, CMD(0xff), cmd_addr);
+ cfi_write(map, CMD(0xff), cmd_addr);
chip->state = FL_POINT;
chip->ref_point_counter++;
int chipnum;
int ret = 0;
- if (!map->virt || (from + len > mtd->size))
+ if (from + len > mtd->size)
return -EINVAL;
*mtdbuf = (void *)map->virt + from;
+ if(*mtdbuf == NULL)
+ return -EINVAL; /* can not point this region */
*retlen = 0;
/* Now lock the chip(s) to POINT state */
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
- cmd_addr = adr & ~(map_bankwidth(map)-1);
+ cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
spin_lock(chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
- map_write(map, CMD(0xff), cmd_addr);
+ cfi_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
}
}
return ret;
}
-#if 0
+
static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
{
struct map_info *map = mtd->priv;
}
if (chip->state != FL_JEDEC_QUERY) {
- map_write(map, CMD(0x90), chip->start);
+ cfi_write(map, CMD(0x90), chip->start);
chip->state = FL_JEDEC_QUERY;
}
return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
}
-#endif
-static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
+
+static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ cfi_word status, status_OK;
unsigned long timeo;
int z, ret=0;
}
ENABLE_VPP(map);
- map_write(map, CMD(0x40), adr);
- map_write(map, datum, adr);
+ cfi_write(map, CMD(0x40), adr);
+ cfi_write(map, datum, adr);
chip->state = FL_WRITING;
spin_unlock(chip->mutex);
- INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
cfi_udelay(chip->word_write_time);
spin_lock(chip->mutex);
continue;
}
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
/* Done and happy. */
chip->state = FL_STATUS;
/* check for lock bit */
- if (map_word_bitsset(map, status, CMD(0x02))) {
+ if (status & CMD(0x02)) {
/* clear status */
- map_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x50), adr);
/* put back into read status register mode */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
ret = -EROFS;
}
out:
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first byte write */
- if (ofs & (map_bankwidth(map)-1)) {
- unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ if (ofs & (CFIDEV_BUSWIDTH-1)) {
+ unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
int gap = ofs - bus_ofs;
- int n;
- map_word datum;
-
- n = min_t(int, len, map_bankwidth(map)-gap);
- datum = map_word_ff(map);
- datum = map_word_load_partial(map, datum, buf, gap, n);
+ int i = 0, n = 0;
+ u_char tmp_buf[8];
+ cfi_word datum;
+
+ while (gap--)
+ tmp_buf[i++] = 0xff;
+ while (len && i < CFIDEV_BUSWIDTH)
+ tmp_buf[i++] = buf[n++], len--;
+ while (i < CFIDEV_BUSWIDTH)
+ tmp_buf[i++] = 0xff;
+
+ if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)tmp_buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)tmp_buf;
+ } else if (cfi_buswidth_is_8()) {
+ datum = *(__u64*)tmp_buf;
+ } else {
+ return -EINVAL; /* should never happen, but be safe */
+ }
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, datum);
if (ret)
return ret;
-
- len -= n;
+
ofs += n;
buf += n;
(*retlen) += n;
}
}
- while(len >= map_bankwidth(map)) {
- map_word datum = map_word_load(map, buf);
+ while(len >= CFIDEV_BUSWIDTH) {
+ cfi_word datum;
+
+ if (cfi_buswidth_is_1()) {
+ datum = *(__u8*)buf;
+ } else if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)buf;
+ } else if (cfi_buswidth_is_8()) {
+ datum = *(__u64*)buf;
+ } else {
+ return -EINVAL;
+ }
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum);
if (ret)
return ret;
- ofs += map_bankwidth(map);
- buf += map_bankwidth(map);
- (*retlen) += map_bankwidth(map);
- len -= map_bankwidth(map);
+ ofs += CFIDEV_BUSWIDTH;
+ buf += CFIDEV_BUSWIDTH;
+ (*retlen) += CFIDEV_BUSWIDTH;
+ len -= CFIDEV_BUSWIDTH;
if (ofs >> cfi->chipshift) {
chipnum ++;
}
}
- if (len & (map_bankwidth(map)-1)) {
- map_word datum;
-
- datum = map_word_ff(map);
- datum = map_word_load_partial(map, datum, buf, 0, len);
+ if (len & (CFIDEV_BUSWIDTH-1)) {
+ int i = 0, n = 0;
+ u_char tmp_buf[8];
+ cfi_word datum;
+
+ while (len--)
+ tmp_buf[i++] = buf[n++];
+ while (i < CFIDEV_BUSWIDTH)
+ tmp_buf[i++] = 0xff;
+
+ if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)tmp_buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)tmp_buf;
+ } else if (cfi_buswidth_is_8()) {
+ datum = *(__u64*)tmp_buf;
+ } else {
+ return -EINVAL; /* should never happen, but be safe */
+ }
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum);
if (ret)
return ret;
- (*retlen) += len;
+ (*retlen) += n;
}
return 0;
unsigned long adr, const u_char *buf, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ cfi_word status, status_OK;
unsigned long cmd_adr, timeo;
int wbufsize, z, ret=0, bytes, words;
- wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
cmd_adr = adr & ~(wbufsize-1);
return ret;
}
+ if (chip->state != FL_STATUS)
+ cfi_write(map, CMD(0x70), cmd_adr);
+
+ status = cfi_read(map, cmd_adr);
+
/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
[...], the device will not accept any more Write to Buffer commands".
So we must check here and reset those bits if they're set. Otherwise
we're just pissing in the wind */
- if (chip->state != FL_STATUS)
- map_write(map, CMD(0x70), cmd_adr);
- status = map_read(map, cmd_adr);
- if (map_word_bitsset(map, status, CMD(0x30))) {
- printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
- map_write(map, CMD(0x50), cmd_adr);
- map_write(map, CMD(0x70), cmd_adr);
+ if (status & CMD(0x30)) {
+ printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %llx). Clearing.\n", status);
+ cfi_write(map, CMD(0x50), cmd_adr);
+ cfi_write(map, CMD(0x70), cmd_adr);
}
-
ENABLE_VPP(map);
chip->state = FL_WRITING_TO_BUFFER;
z = 0;
for (;;) {
- map_write(map, CMD(0xe8), cmd_adr);
+ cfi_write(map, CMD(0xe8), cmd_adr);
- status = map_read(map, cmd_adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_adr);
+ if ((status & status_OK) == status_OK)
break;
spin_unlock(chip->mutex);
if (++z > 20) {
/* Argh. Not ready for write to buffer */
- map_write(map, CMD(0x70), cmd_adr);
+ cfi_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
- status.x[0], map_read(map, cmd_adr).x[0]);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
/* Odd. Clear status bits */
- map_write(map, CMD(0x50), cmd_adr);
- map_write(map, CMD(0x70), cmd_adr);
+ cfi_write(map, CMD(0x50), cmd_adr);
+ cfi_write(map, CMD(0x70), cmd_adr);
ret = -EIO;
goto out;
}
}
/* Write length of data to come */
- bytes = len & (map_bankwidth(map)-1);
- words = len / map_bankwidth(map);
- map_write(map, CMD(words - !bytes), cmd_adr );
+ bytes = len & (CFIDEV_BUSWIDTH-1);
+ words = len / CFIDEV_BUSWIDTH;
+ cfi_write(map, CMD(words - !bytes), cmd_adr );
/* Write data */
z = 0;
- while(z < words * map_bankwidth(map)) {
- map_word datum = map_word_load(map, buf);
- map_write(map, datum, adr+z);
-
- z += map_bankwidth(map);
- buf += map_bankwidth(map);
+ while(z < words * CFIDEV_BUSWIDTH) {
+ if (cfi_buswidth_is_1()) {
+ u8 *b = (u8 *)buf;
+
+ map_write8 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else if (cfi_buswidth_is_2()) {
+ u16 *b = (u16 *)buf;
+
+ map_write16 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else if (cfi_buswidth_is_4()) {
+ u32 *b = (u32 *)buf;
+
+ map_write32 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else if (cfi_buswidth_is_8()) {
+ u64 *b = (u64 *)buf;
+
+ map_write64 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ z += CFIDEV_BUSWIDTH;
}
-
if (bytes) {
- map_word datum;
-
- datum = map_word_ff(map);
- datum = map_word_load_partial(map, datum, buf, 0, bytes);
- map_write(map, datum, adr+z);
+ int i = 0, n = 0;
+ u_char tmp_buf[8], *tmp_p = tmp_buf;
+
+ while (bytes--)
+ tmp_buf[i++] = buf[n++];
+ while (i < CFIDEV_BUSWIDTH)
+ tmp_buf[i++] = 0xff;
+ if (cfi_buswidth_is_2()) {
+ u16 *b = (u16 *)tmp_p;
+
+ map_write16 (map, *b++, adr+z);
+ tmp_p = (u_char *)b;
+ } else if (cfi_buswidth_is_4()) {
+ u32 *b = (u32 *)tmp_p;
+
+ map_write32 (map, *b++, adr+z);
+ tmp_p = (u_char *)b;
+ } else if (cfi_buswidth_is_8()) {
+ u64 *b = (u64 *)tmp_p;
+
+ map_write64 (map, *b++, adr+z);
+ tmp_p = (u_char *)b;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
}
-
/* GO GO GO */
- map_write(map, CMD(0xd0), cmd_adr);
+ cfi_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
spin_unlock(chip->mutex);
- INVALIDATE_CACHED_RANGE(map, adr, len);
cfi_udelay(chip->buffer_write_time);
spin_lock(chip->mutex);
continue;
}
- status = map_read(map, cmd_adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
chip->state = FL_STATUS;
/* check for lock bit */
- if (map_word_bitsset(map, status, CMD(0x02))) {
+ if (status & CMD(0x02)) {
/* clear status */
- map_write(map, CMD(0x50), cmd_adr);
+ cfi_write(map, CMD(0x50), cmd_adr);
/* put back into read status register mode */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
ret = -EROFS;
}
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
unsigned long ofs;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first word write */
- if (ofs & (map_bankwidth(map)-1)) {
- size_t local_len = (-ofs)&(map_bankwidth(map)-1);
+ if (ofs & (CFIDEV_BUSWIDTH-1)) {
+ size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
if (local_len > len)
local_len = len;
ret = cfi_intelext_write_words(mtd, to, local_len,
}
}
+ /* Write buffer is worth it only if more than one word to write... */
while(len) {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
}
typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
- unsigned long adr, int len, void *thunk);
+ unsigned long adr, void *thunk);
static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
loff_t ofs, size_t len, void *thunk)
i=first;
while(len) {
- unsigned long chipmask;
- int size = regions[i].erasesize;
-
- ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
+ ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
if (ret)
return ret;
- adr += size;
- len -= size;
+ adr += regions[i].erasesize;
+ len -= regions[i].erasesize;
- chipmask = (1 << cfi->chipshift) - 1;
- if ((adr & chipmask) == ((regions[i].offset + size * regions[i].numblocks) & chipmask))
+ if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
i++;
if (adr >> cfi->chipshift) {
}
-static int do_erase_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr, int len, void *thunk)
+static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ cfi_word status, status_OK;
unsigned long timeo;
int retries = 3;
DECLARE_WAITQUEUE(wait, current);
ENABLE_VPP(map);
/* Clear the status register first */
- map_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x50), adr);
/* Now erase */
- map_write(map, CMD(0x20), adr);
- map_write(map, CMD(0xD0), adr);
+ cfi_write(map, CMD(0x20), adr);
+ cfi_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
spin_unlock(chip->mutex);
- INVALIDATE_CACHED_RANGE(map, adr, len);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((chip->erase_time*HZ)/(2*1000));
spin_lock(chip->mutex);
chip->erase_suspended = 0;
}
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
- adr, status.x[0], map_read(map, adr).x[0]);
+ printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
+ adr, (__u64)status, (__u64)cfi_read(map, adr));
/* Clear status bits */
- map_write(map, CMD(0x50), adr);
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x70), adr);
DISABLE_VPP(map);
spin_unlock(chip->mutex);
return -EIO;
ret = 0;
/* We've broken this before. It doesn't hurt to be safe */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- status = map_read(map, adr);
+ status = cfi_read(map, adr);
/* check for lock bit */
- if (map_word_bitsset(map, status, CMD(0x3a))) {
- unsigned char chipstatus = status.x[0];
- if (!map_word_equal(map, status, CMD(chipstatus))) {
- int i, w;
- for (w=0; w<map_words(map); w++) {
- for (i = 0; i<cfi_interleave(cfi); i++) {
- chipstatus |= status.x[w] >> (cfi->device_type * 8);
- }
+ if (status & CMD(0x3a)) {
+ unsigned char chipstatus = status;
+ if (status != CMD(status & 0xff)) {
+ int i;
+ for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
+ chipstatus |= status >> (cfi->device_type * 8);
}
- printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
- status.x[0], chipstatus);
+ printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
}
/* Reset the error bits */
- map_write(map, CMD(0x50), adr);
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x70), adr);
if ((chipstatus & 0x30) == 0x30) {
- printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
+ printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
ret = -EIO;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
- printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
+ printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
ret = -EIO;
} else if (chipstatus & 0x20) {
if (retries--) {
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
spin_unlock(chip->mutex);
goto retry;
}
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
ret = -EIO;
}
}
return ret;
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
}
#ifdef DEBUG_LOCK_BITS
-static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr, int len, void *thunk)
+static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ofs_factor = cfi->interleave * cfi->device_type;
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
adr, cfi_read_query(map, adr+(2*ofs_factor)));
- chip->state = FL_JEDEC_QUERY;
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+
return 0;
}
#endif
#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
-static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr, int len, void *thunk)
+static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ cfi_word status, status_OK;
unsigned long timeo = jiffies + HZ;
int ret;
}
ENABLE_VPP(map);
- map_write(map, CMD(0x60), adr);
+ cfi_write(map, CMD(0x60), adr);
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
- map_write(map, CMD(0x01), adr);
+ cfi_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
- map_write(map, CMD(0xD0), adr);
+ cfi_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
} else
BUG();
timeo = jiffies + (HZ*20);
for (;;) {
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
- status.x[0], map_read(map, adr).x[0]);
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock(chip->mutex);
return -EIO;
ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
#ifdef DEBUG_LOCK_BITS
- printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
- __FUNCTION__, ret);
+ printk(KERN_DEBUG
+ "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, 0);
#endif
ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
#ifdef DEBUG_LOCK_BITS
- printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
- __FUNCTION__, ret);
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, 0);
#endif
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
- map_write(map, CMD(0xFF), cfi->chips[i].start);
+ cfi_write(map, CMD(0xFF), 0);
chip->state = FL_READY;
wake_up(&chip->wq);
}
struct cfi_private *cfi = map->fldrv_priv;
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
- kfree(cfi->chips[0].priv);
kfree(cfi);
kfree(mtd->eraseregions);
}
* AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
*
* Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
- * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
*
* 2_by_8 routines added by Simon Munton
*
- * 4_by_16 work by Carolyn J. Smith
- *
- * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
- *
* This code is GPL
*
- * $Id: cfi_cmdset_0002.c,v 1.106 2004/08/09 14:02:32 dwmw2 Exp $
+ * $Id: cfi_cmdset_0002.c,v 1.74 2003/05/28 12:51:48 dwmw2 Exp $
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
+#include <linux/mtd/compatmac.h>
#define AMD_BOOTLOC_BUG
-#define FORCE_WORD_WRITE 0
-
-#define MAX_WORD_RETRIES 3
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
-static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
+static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
-static int cfi_amdstd_lock_varsize(struct mtd_info *, loff_t, size_t);
-static int cfi_amdstd_unlock_varsize(struct mtd_info *, loff_t, size_t);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
};
-/* #define DEBUG_LOCK_BITS */
-/* #define DEBUG_CFI_FEATURES */
-
-
-#ifdef DEBUG_CFI_FEATURES
-static void cfi_tell_features(struct cfi_pri_amdstd *extp)
-{
- const char* erase_suspend[3] = {
- "Not supported", "Read only", "Read/write"
- };
- const char* top_bottom[6] = {
- "No WP", "8x8KiB sectors at top & bottom, no WP",
- "Bottom boot", "Top boot",
- "Uniform, Bottom WP", "Uniform, Top WP"
- };
-
- printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
- printk(" Address sensitive unlock: %s\n",
- (extp->SiliconRevision & 1) ? "Not required" : "Required");
-
- if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
- printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
- else
- printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
-
- if (extp->BlkProt == 0)
- printk(" Block protection: Not supported\n");
- else
- printk(" Block protection: %d sectors per group\n", extp->BlkProt);
-
-
- printk(" Temporary block unprotect: %s\n",
- extp->TmpBlkUnprotect ? "Supported" : "Not supported");
- printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
- printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
- printk(" Burst mode: %s\n",
- extp->BurstMode ? "Supported" : "Not supported");
- if (extp->PageMode == 0)
- printk(" Page mode: Not supported\n");
- else
- printk(" Page mode: %d word page\n", extp->PageMode << 2);
-
- printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
- extp->VppMin >> 4, extp->VppMin & 0xf);
- printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
- extp->VppMax >> 4, extp->VppMax & 0xf);
-
- if (extp->TopBottom < ARRAY_SIZE(top_bottom))
- printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
- else
- printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
-}
-#endif
-
-#ifdef AMD_BOOTLOC_BUG
-/* Wheee. Bring me the head of someone at AMD. */
-static void fixup_amd_bootblock(struct map_info *map, void* param)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
- __u8 major = extp->MajorVersion;
- __u8 minor = extp->MinorVersion;
-
- if (((major << 8) | minor) < 0x3131) {
- /* CFI version 1.0 => don't trust bootloc */
- if (cfi->id & 0x80) {
- printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
- extp->TopBottom = 3; /* top boot */
- } else {
- extp->TopBottom = 2; /* bottom boot */
- }
- }
-}
-#endif
-
-static struct cfi_fixup fixup_table[] = {
-#ifdef AMD_BOOTLOC_BUG
- {
- 0x0001, /* AMD */
- CFI_ID_ANY,
- fixup_amd_bootblock, NULL
- },
-#endif
- { 0, 0, NULL, NULL }
-};
-
-
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned char bootloc;
+ int ofs_factor = cfi->interleave * cfi->device_type;
int i;
+ __u8 major, minor;
+ __u32 base = cfi->chips[0].start;
if (cfi->cfi_mode==CFI_MODE_CFI){
- /*
- * It's a real CFI chip, not one for which the probe
- * routine faked a CFI structure. So we read the feature
- * table from it.
- */
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
- struct cfi_pri_amdstd *extp;
-
- extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
- if (!extp)
- return NULL;
-
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
-
- cfi_fixup(map, fixup_table);
-
-#ifdef DEBUG_CFI_FEATURES
- /* Tell the user about it in lots of lovely detail */
- cfi_tell_features(extp);
-#endif
-
- bootloc = extp->TopBottom;
- if ((bootloc != 2) && (bootloc != 3)) {
- printk(KERN_WARNING "%s: CFI does not contain boot "
- "bank location. Assuming top.\n", map->name);
- bootloc = 2;
- }
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ major = cfi_read_query(map, base + (adr+3)*ofs_factor);
+ minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
+
+ printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
+ major, minor, adr);
+ cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+ cfi->mfr = cfi_read_query(map, base);
+ cfi->id = cfi_read_query(map, base + ofs_factor);
+
+ /* Wheee. Bring me the head of someone at AMD. */
+#ifdef AMD_BOOTLOC_BUG
+ if (((major << 8) | minor) < 0x3131) {
+ /* CFI version 1.0 => don't trust bootloc */
+ if (cfi->id & 0x80) {
+ printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
+ bootloc = 3; /* top boot */
+ } else {
+ bootloc = 2; /* bottom boot */
+ }
+ } else
+#endif
+ {
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
+ }
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
}
}
/*
- * These might already be setup (more correctly) by
- * jedec_probe.c - still need it for cfi_probe.c path.
+ * FIXME - These might already be setup (more correctly)
+ * buy jedec_probe.c.
*/
- if ( ! (cfi->addr_unlock1 && cfi->addr_unlock2) ) {
- switch (cfi->device_type) {
- case CFI_DEVICETYPE_X8:
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
- break;
- case CFI_DEVICETYPE_X16:
- cfi->addr_unlock1 = 0xaaa;
- if (map_bankwidth(map) == cfi_interleave(cfi)) {
- /* X16 chip(s) in X8 mode */
- cfi->addr_unlock2 = 0x555;
- } else {
- cfi->addr_unlock2 = 0x554;
- }
- break;
- case CFI_DEVICETYPE_X32:
- cfi->addr_unlock1 = 0x1554;
- if (map_bankwidth(map) == cfi_interleave(cfi)*2) {
- /* X32 chip(s) in X16 mode */
- cfi->addr_unlock1 = 0xaaa;
- } else {
- cfi->addr_unlock2 = 0xaa8;
- }
- break;
- default:
- printk(KERN_WARNING
- "MTD %s(): Unsupported device type %d\n",
- __func__, cfi->device_type);
- return NULL;
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ break;
+ case CFI_DEVICETYPE_X16:
+ cfi->addr_unlock1 = 0xaaa;
+ if (map->buswidth == cfi->interleave) {
+ /* X16 chip(s) in X8 mode */
+ cfi->addr_unlock2 = 0x555;
+ } else {
+ cfi->addr_unlock2 = 0x554;
}
+ break;
+ case CFI_DEVICETYPE_X32:
+ cfi->addr_unlock1 = 0x1555;
+ cfi->addr_unlock2 = 0xaaa;
+ break;
+ default:
+ printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
+ return NULL;
}
-
} /* CFI mode */
for (i=0; i< cfi->numchips; i++) {
map->fldrv = &cfi_amdstd_chipdrv;
+ cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
return cfi_amdstd_setup(map);
}
-
static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
- unsigned long offset = 0;
- int i,j;
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
printk(KERN_NOTICE "number of %s chips: %d\n",
- (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
+ (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
if (!mtd) {
- printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
- goto setup_err;
+ printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ goto setup_err;
}
memset(mtd, 0, sizeof(*mtd));
mtd->type = MTD_NORFLASH;
/* Also select the correct geometry setup too */
mtd->size = devsize * cfi->numchips;
-
- mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
- * mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
- goto setup_err;
- }
+
+ if (cfi->cfiq->NumEraseRegions == 1) {
+ /* No need to muck about with multiple erase sizes */
+ mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
+ } else {
+ unsigned long offset = 0;
+ int i,j;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions) {
+ printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
+ goto setup_err;
+ }
- for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
- unsigned long ernum, ersize;
- ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
- ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
- if (mtd->erasesize < ersize) {
- mtd->erasesize = ersize;
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
}
- for (j=0; j<cfi->numchips; j++) {
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
}
- offset += (ersize * ernum);
- }
- if (offset != devsize) {
- /* Argh */
- printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
- goto setup_err;
- }
#if 0
- // debug
- for (i=0; i<mtd->numeraseregions;i++){
- printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
+ // debug
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
+ i,mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+#endif
}
+
+ switch (CFIDEV_BUSWIDTH)
+ {
+ case 1:
+ case 2:
+ case 4:
+#if 1
+ if (mtd->numeraseregions > 1)
+ mtd->erase = cfi_amdstd_erase_varsize;
+ else
#endif
+ if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1)
+ mtd->erase = cfi_amdstd_erase_chip;
+ else
+ mtd->erase = cfi_amdstd_erase_onesize;
+ mtd->read = cfi_amdstd_read;
+ mtd->write = cfi_amdstd_write;
+ break;
- if (mtd->numeraseregions == 1
- && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1) {
- mtd->erase = cfi_amdstd_erase_chip;
- } else {
- mtd->erase = cfi_amdstd_erase_varsize;
- mtd->lock = cfi_amdstd_lock_varsize;
- mtd->unlock = cfi_amdstd_unlock_varsize;
+ default:
+ printk(KERN_WARNING "Unsupported buswidth\n");
+ goto setup_err;
+ break;
}
-
- if ( cfi->cfiq->BufWriteTimeoutTyp && !FORCE_WORD_WRITE) {
- DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
- mtd->write = cfi_amdstd_write_buffers;
- } else {
- DEBUG(MTD_DEBUG_LEVEL1, "Using word write method\n" );
- mtd->write = cfi_amdstd_write_words;
+ if (cfi->fast_prog) {
+ /* In cfi_amdstd_write() we frob the protection stuff
+ without paying any attention to the state machine.
+ This upsets in-progress erases. So we turn this flag
+ off for now till the code gets fixed. */
+ printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n");
+ cfi->fast_prog = 0;
}
- mtd->read = cfi_amdstd_read;
- /* FIXME: erase-suspend-program is broken. See
- http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
- printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
-
- /* does this chip have a secsi area? */
+ /* does this chip have a secsi area? */
if(cfi->mfr==1){
switch(cfi->id){
return NULL;
}
-/*
- * Return true if the chip is ready.
- *
- * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
- * non-suspended sector) and is indicated by no toggle bits toggling.
- *
- * Note that anything more complicated than checking if no bits are toggling
- * (including checking DQ5 for an error status) is tricky to get working
- * correctly and is therefore not done (particulary with interleaved chips
- * as each chip must be checked independantly of the others).
- */
-static int chip_ready(struct map_info *map, unsigned long addr)
-{
- map_word d, t;
-
- d = map_read(map, addr);
- t = map_read(map, addr);
-
- return map_word_equal(map, d, t);
-}
-
-static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
DECLARE_WAITQUEUE(wait, current);
- struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo;
- struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
+ unsigned long timeo = jiffies + HZ;
- resettime:
- timeo = jiffies + HZ;
retry:
- switch (chip->state) {
-
- case FL_STATUS:
- for (;;) {
- if (chip_ready(map, adr))
- break;
-
- if (time_after(jiffies, timeo)) {
- printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
- cfi_spin_unlock(chip->mutex);
- return -EIO;
- }
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(1);
- cfi_spin_lock(chip->mutex);
- /* Someone else might have been playing with it. */
- goto retry;
- }
-
- case FL_READY:
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- return 0;
-
- case FL_ERASING:
- if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
- goto sleep;
-
- if (!(mode == FL_READY || mode == FL_POINT
- || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
- || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
- goto sleep;
-
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
-
- /* Erase suspend */
- /* It's harmless to issue the Erase-Suspend and Erase-Resume
- * commands when the erase algorithm isn't in progress. */
- map_write(map, CMD(0xB0), chip->in_progress_block_addr);
- chip->oldstate = FL_ERASING;
- chip->state = FL_ERASE_SUSPENDING;
- chip->erase_suspended = 1;
- for (;;) {
- if (chip_ready(map, adr))
- break;
-
- if (time_after(jiffies, timeo)) {
- /* Should have suspended the erase by now.
- * Send an Erase-Resume command as either
- * there was an error (so leave the erase
- * routine to recover from it) or we trying to
- * use the erase-in-progress sector. */
- map_write(map, CMD(0x30), chip->in_progress_block_addr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
- printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
- return -EIO;
- }
-
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(1);
- cfi_spin_lock(chip->mutex);
- /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
- So we can just loop here. */
- }
- chip->state = FL_READY;
- return 0;
-
- case FL_POINT:
- /* Only if there's no operation suspended... */
- if (mode == FL_READY && chip->oldstate == FL_READY)
- return 0;
+ cfi_spin_lock(chip->mutex);
- default:
- sleep:
+ if (chip->state != FL_READY){
+#if 0
+ printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
+#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
+
schedule();
remove_wait_queue(&chip->wq, &wait);
- cfi_spin_lock(chip->mutex);
- goto resettime;
- }
-}
-
-
-static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
-{
- struct cfi_private *cfi = map->fldrv_priv;
-
- switch(chip->oldstate) {
- case FL_ERASING:
- chip->state = chip->oldstate;
- map_write(map, CMD(0x30), chip->in_progress_block_addr);
- chip->oldstate = FL_READY;
- chip->state = FL_ERASING;
- break;
-
- case FL_READY:
- case FL_STATUS:
- /* We should really make set_vpp() count, rather than doing this */
- DISABLE_VPP(map);
- break;
- default:
- printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
- }
- wake_up(&chip->wq);
-}
-
+#if 0
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + HZ;
-static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
-{
- unsigned long cmd_addr;
- struct cfi_private *cfi = map->fldrv_priv;
- int ret;
+ goto retry;
+ }
adr += chip->start;
- /* Ensure cmd read/writes are aligned. */
- cmd_addr = adr & ~(map_bankwidth(map)-1);
-
- cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, cmd_addr, FL_READY);
- if (ret) {
- cfi_spin_unlock(chip->mutex);
- return ret;
- }
-
- if (chip->state != FL_POINT && chip->state != FL_READY) {
- map_write(map, CMD(0xf0), cmd_addr);
- chip->state = FL_READY;
- }
+ chip->state = FL_READY;
map_copy_from(map, buf, adr, len);
- put_chip(map, chip, cmd_addr);
-
+ wake_up(&chip->wq);
cfi_spin_unlock(chip->mutex);
+
return 0;
}
-
static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
return ret;
}
-
static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
DECLARE_WAITQUEUE(wait, current);
if (chip->state != FL_READY){
#if 0
- printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
+ printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
+
cfi_spin_unlock(chip->mutex);
schedule();
adr += chip->start;
chip->state = FL_READY;
-
- /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
+
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_copy_from(map, buf, adr, len);
- /* should these be CFI_DEVICETYPE_X8 instead of cfi->device_type? */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
return ret;
}
-
-static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
+static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum, int fast)
{
- struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
- /*
- * We use a 1ms + 1 jiffies generic timeout for writes (most devices
- * have a max write time of a few hundreds usec). However, we should
- * use the maximum timeout value given by the chip at probe time
- * instead. Unfortunately, struct flchip does have a field for
- * maximum timeout, only for typical which can be far too short
- * depending of the conditions. The ' + 1' is to avoid having a
- * timeout of 0 jiffies if HZ is smaller than 1000.
- */
- unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ unsigned int oldstatus, status, prev_oldstatus, prev_status;
+ unsigned int dq6;
+ struct cfi_private *cfi = map->fldrv_priv;
+ /* We use a 1ms + 1 jiffies generic timeout for writes (most devices have
+ a max write time of a few hundreds usec). However, we should use the
+ maximum timeout value given by the chip at probe time instead.
+ Unfortunately, struct flchip does have a field for maximum timeout,
+ only for typical which can be far too short depending of the conditions.
+ The ' + 1' is to avoid having a timeout of 0 jiffies if HZ is smaller
+ than 1000. Using a static variable allows makes us save the costly
+ divide operation at each word write.*/
+ static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ DECLARE_WAITQUEUE(wait, current);
int ret = 0;
- map_word oldd, curd;
- int retry_cnt = 0;
-
- adr += chip->start;
+ int ta = 0;
+ retry:
cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, adr, FL_WRITING);
- if (ret) {
+
+ if (chip->state != FL_READY) {
+#if 0
+ printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
+#endif
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
- return ret;
- }
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
- __func__, adr, datum.x[0] );
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+#if 0
+ printk(KERN_DEBUG "Wake up to write:\n");
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + HZ;
- /*
- * Check for a NOP for the case when the datum to write is already
- * present - it saves time and works around buggy chips that corrupt
- * data at other locations when 0xff is written to a location that
- * already contains 0xff.
- */
- oldd = map_read(map, adr);
- if (map_word_equal(map, oldd, datum)) {
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
- __func__);
- goto op_done;
- }
+ goto retry;
+ }
- ENABLE_VPP(map);
- retry:
- /*
- * The CFI_DEVICETYPE_X8 argument is needed even when
- * cfi->device_type != CFI_DEVICETYPE_X8. The addresses for
- * command sequences don't scale even when the device is
- * wider. This is the case for many of the cfi_send_gen_cmd()
- * below. I'm not sure, however, why some use
- * cfi->device_type.
- */
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- map_write(map, datum, adr);
chip->state = FL_WRITING;
+ adr += chip->start;
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8llx)\n",
+ __func__, adr, datum );
+
+ ENABLE_VPP(map);
+ if (fast) { /* Unlock bypass */
+ cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
+ }
+ else {
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ }
+ cfi_write(map, datum, adr);
+
cfi_spin_unlock(chip->mutex);
cfi_udelay(chip->word_write_time);
cfi_spin_lock(chip->mutex);
+ /*
+ * Polling toggle bits instead of reading back many times
+ * This ensures that write operation is really completed,
+ * or tells us why it failed.
+ *
+ * It appears tha the polling and decoding of error state might
+ * be simplified. Don't do it unless you really know what you
+ * are doing. You must remember that JESD21-C 3.5.3 states that
+ * the status must be read back an _additional_ two times before
+ * a failure is determined. This is because these devices have
+ * internal state machines that are asynchronous to the external
+ * data bus. During an erase or write the read-back status of the
+ * polling bits might be transitioning internaly when the external
+ * read-back occurs. This means that the bits aren't in the final
+ * state and they might appear to report an error as they transition
+ * and are in a weird state. This will produce infrequent errors
+ * that will usually disappear the next time an erase or write
+ * happens (Try tracking those errors down!). To ensure that
+ * the bits are not in transition the location must be read-back
+ * two more times and compared against what was written - BOTH reads
+ * MUST match what was written - don't think this can be simplified
+ * to only the last read matching. If the comparison fails, error
+ * state can then be decoded.
+ *
+ * - Thayne Harbaugh
+ */
+ dq6 = CMD(1<<6);
/* See comment above for timeout value. */
timeo = jiffies + uWriteTimeout;
- for (;;) {
- if (chip->state != FL_WRITING) {
- /* Someone's suspended the write. Sleep */
- DECLARE_WAITQUEUE(wait, current);
+
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
+ /*
+ * This only checks if dq6 is still toggling and that our
+ * timer hasn't expired. We purposefully ignore the chips
+ * internal timer that will assert dq5 and leave dq6 toggling.
+ * This is done for a variety of reasons:
+ * 1) Not all chips support dq5.
+ * 2) Dealing with asynchronous status bit and data updates
+ * and reading a device two more times creates _messy_
+ * logic when trying to deal with interleaved devices -
+ * some may be changing while others are still busy.
+ * 3) Checking dq5 only helps to optimize an error case that
+ * should at worst be infrequent and at best non-existent.
+ *
+ * If our timeout occurs _then_ we will check dq5 to see
+ * if the device also had an internal timeout.
+ */
+ while( ( ( status ^ oldstatus ) & dq6 )
+ && ! ( ta = time_after(jiffies, timeo) ) ) {
+
+ if (need_resched()) {
cfi_spin_unlock(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ / 2); /* FIXME */
+ yield();
cfi_spin_lock(chip->mutex);
- continue;
- }
+ } else
+ udelay(1);
- /* Test to see if toggling has stopped. */
- oldd = map_read(map, adr);
- curd = map_read(map, adr);
- if (map_word_equal(map, curd, oldd)) {
- /* Do we have the correct value? */
- if (map_word_equal(map, curd, datum)) {
- goto op_done;
- }
- /* Nope something has gone wrong. */
- break;
- }
+ oldstatus = cfi_read( map, adr );
+ status = cfi_read( map, adr );
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
+ }
+
+ /*
+ * Something kicked us out of the read-back loop. We'll
+ * check success befor checking failure.
+ * Even though dq6 might be true data, it is unkown if
+ * all of the other bits have changed to true data due to
+ * the asynchronous nature of the internal state machine.
+ * We will read two more times and use this to either
+ * verify that the write completed successfully or
+ * that something really went wrong. BOTH reads
+ * must match what was written - this certifies that
+ * bits aren't still changing and that the status
+ * bits erroneously match the datum that was written.
+ */
+ prev_oldstatus = oldstatus;
+ prev_status = status;
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
+
+ if ( oldstatus == datum && status == datum ) {
+ /* success - do nothing */
+ goto write_done;
+ }
- if (time_after(jiffies, timeo)) {
- printk(KERN_WARNING "MTD %s(): software timeout\n",
+ if ( ta ) {
+ int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
+ if ( status & dq5mask ) {
+ /* dq5 asserted - decode interleave chips */
+ printk( KERN_WARNING
+ "MTD %s(): FLASH internal timeout: 0x%.8x\n",
+ __func__,
+ status & dq5mask );
+ } else {
+ printk( KERN_WARNING
+ "MTD %s(): Software timed out during write.\n",
__func__ );
- break;
}
-
- /* Latency issues. Drop the lock, wait a while and retry */
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(1);
- cfi_spin_lock(chip->mutex);
+ goto write_failed;
}
+ /*
+ * If we get to here then it means that something
+ * is wrong and it's not a timeout. Something
+ * is seriously wacky! Dump some debug info.
+ */
+ printk(KERN_WARNING
+ "MTD %s(): Wacky! Unable to decode failure status\n",
+ __func__ );
+
+ printk(KERN_WARNING
+ "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
+ __func__, adr, datum,
+ prev_oldstatus, prev_status,
+ oldstatus, status);
+
+ write_failed:
+ ret = -EIO;
/* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ cfi_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
- goto retry;
- ret = -EIO;
- op_done:
+ write_done:
+ DISABLE_VPP(map);
chip->state = FL_READY;
- put_chip(map, chip, adr);
+ wake_up(&chip->wq);
cfi_spin_unlock(chip->mutex);
return ret;
}
-
-static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs, chipstart;
- DECLARE_WAITQUEUE(wait, current);
*retlen = 0;
if (!len)
chipstart = cfi->chips[chipnum].start;
/* If it's not bus-aligned, do the first byte write */
- if (ofs & (map_bankwidth(map)-1)) {
- unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ if (ofs & (CFIDEV_BUSWIDTH-1)) {
+ unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
int i = ofs - bus_ofs;
int n = 0;
- map_word tmp_buf;
+ u_char tmp_buf[8];
+ cfi_word datum;
- retry:
- cfi_spin_lock(cfi->chips[chipnum].mutex);
-
- if (cfi->chips[chipnum].state != FL_READY) {
-#if 0
- printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
-#endif
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&cfi->chips[chipnum].wq, &wait);
-
- cfi_spin_unlock(cfi->chips[chipnum].mutex);
+ map_copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
+ while (len && i < CFIDEV_BUSWIDTH)
+ tmp_buf[i++] = buf[n++], len--;
- schedule();
- remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
- goto retry;
+ if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)tmp_buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)tmp_buf;
+ } else {
+ return -EINVAL; /* should never happen, but be safe */
}
- /* Load 'tmp_buf' with old contents of flash */
- tmp_buf = map_read(map, bus_ofs+chipstart);
-
- cfi_spin_unlock(cfi->chips[chipnum].mutex);
-
- /* Number of bytes to copy from buffer */
- n = min_t(int, len, map_bankwidth(map)-i);
-
- tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
-
ret = do_write_oneword(map, &cfi->chips[chipnum],
- bus_ofs, tmp_buf);
+ bus_ofs, datum, 0);
if (ret)
return ret;
ofs += n;
buf += n;
(*retlen) += n;
- len -= n;
if (ofs >> cfi->chipshift) {
chipnum ++;
}
}
- /* We are now aligned, write as much as possible */
- while(len >= map_bankwidth(map)) {
- map_word datum;
-
- datum = map_word_load(map, buf);
+ if (cfi->fast_prog) {
+ /* Go into unlock bypass mode */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ }
+ /* We are now aligned, write as much as possible */
+ while(len >= CFIDEV_BUSWIDTH) {
+ cfi_word datum;
+
+ if (cfi_buswidth_is_1()) {
+ datum = *(__u8*)buf;
+ } else if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)buf;
+ } else {
+ return -EINVAL;
+ }
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum);
- if (ret)
+ ofs, datum, cfi->fast_prog);
+ if (ret) {
+ if (cfi->fast_prog){
+ /* Get out of unlock bypass mode */
+ cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ }
return ret;
+ }
- ofs += map_bankwidth(map);
- buf += map_bankwidth(map);
- (*retlen) += map_bankwidth(map);
- len -= map_bankwidth(map);
+ ofs += CFIDEV_BUSWIDTH;
+ buf += CFIDEV_BUSWIDTH;
+ (*retlen) += CFIDEV_BUSWIDTH;
+ len -= CFIDEV_BUSWIDTH;
if (ofs >> cfi->chipshift) {
+ if (cfi->fast_prog){
+ /* Get out of unlock bypass mode */
+ cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ }
+
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
chipstart = cfi->chips[chipnum].start;
+ if (cfi->fast_prog){
+ /* Go into unlock bypass mode for next set of chips */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ }
}
}
- /* Write the trailing bytes if any */
- if (len & (map_bankwidth(map)-1)) {
- map_word tmp_buf;
-
- retry1:
- cfi_spin_lock(cfi->chips[chipnum].mutex);
-
- if (cfi->chips[chipnum].state != FL_READY) {
-#if 0
- printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
-#endif
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&cfi->chips[chipnum].wq, &wait);
-
- cfi_spin_unlock(cfi->chips[chipnum].mutex);
+ if (cfi->fast_prog){
+ /* Get out of unlock bypass mode */
+ cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
+ }
- schedule();
- remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
- goto retry1;
+ /* Write the trailing bytes if any */
+ if (len & (CFIDEV_BUSWIDTH-1)) {
+ int i = 0, n = 0;
+ u_char tmp_buf[8];
+ cfi_word datum;
+
+ map_copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
+ while (len--)
+ tmp_buf[i++] = buf[n++];
+
+ if (cfi_buswidth_is_2()) {
+ datum = *(__u16*)tmp_buf;
+ } else if (cfi_buswidth_is_4()) {
+ datum = *(__u32*)tmp_buf;
+ } else {
+ return -EINVAL; /* should never happen, but be safe */
}
- tmp_buf = map_read(map, ofs + chipstart);
-
- cfi_spin_unlock(cfi->chips[chipnum].mutex);
-
- tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
-
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, tmp_buf);
+ ofs, datum, 0);
if (ret)
return ret;
- (*retlen) += len;
+ (*retlen) += n;
}
return 0;
}
-
-/*
- * FIXME: interleaved mode not tested, and probably not supported!
- */
-static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
- unsigned long adr, const u_char *buf, int len)
+static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
{
- struct cfi_private *cfi = map->fldrv_priv;
+ unsigned int oldstatus, status, prev_oldstatus, prev_status;
+ unsigned int dq6;
unsigned long timeo = jiffies + HZ;
- /* see comments in do_write_oneword() regarding uWriteTimeo. */
- static unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
- int ret = -EIO;
- unsigned long cmd_adr;
- int z, words;
- map_word datum;
-
- adr += chip->start;
- cmd_adr = adr;
+ unsigned long int adr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+ int ta = 0;
+ cfi_word ones = 0;
+ retry:
cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, adr, FL_WRITING);
- if (ret) {
+
+ if (chip->state != FL_READY){
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
- return ret;
- }
- datum = map_word_load(map, buf);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+#if 0
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + HZ;
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
- __func__, adr, datum.x[0] );
+ goto retry;
+ }
+ chip->state = FL_ERASING;
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ __func__, chip->start );
+
+ /* Handle devices with one erase region, that only implement
+ * the chip erase command.
+ */
ENABLE_VPP(map);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
-
- /* Write Buffer Load */
- map_write(map, CMD(0x25), cmd_adr);
-
- chip->state = FL_WRITING_TO_BUFFER;
-
- /* Write length of data to come */
- words = len / map_bankwidth(map);
- map_write(map, CMD(words - 1), cmd_adr);
- /* Write data */
- z = 0;
- while(z < words * map_bankwidth(map)) {
- datum = map_word_load(map, buf);
- map_write(map, datum, adr + z);
-
- z += map_bankwidth(map);
- buf += map_bankwidth(map);
- }
- z -= map_bankwidth(map);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ timeo = jiffies + (HZ*20);
+ adr = cfi->addr_unlock1;
- adr += z;
+ /* Wait for the end of programing/erasure by using the toggle method.
+ * As long as there is a programming procedure going on, bit 6
+ * is toggling it's state with each consecutive read.
+ * The toggling stops as soon as the procedure is completed.
+ *
+ * If the process has gone on for too long on the chip bit 5 gets.
+ * After bit5 is set you can kill the operation by sending a reset
+ * command to the chip.
+ */
+ /* see comments in do_write_oneword */
+ dq6 = CMD(1<<6);
- /* Write Buffer Program Confirm: GO GO GO */
- map_write(map, CMD(0x29), cmd_adr);
- chip->state = FL_WRITING;
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(chip->buffer_write_time);
- cfi_spin_lock(chip->mutex);
+ while( ( ( status ^ oldstatus ) & dq6 )
+ && ! ( ta = time_after(jiffies, timeo) ) ) {
+ int wait_reps;
- timeo = jiffies + uWriteTimeout;
+ /* an initial short sleep */
+ cfi_spin_unlock(chip->mutex);
+ schedule_timeout(HZ/100);
+ cfi_spin_lock(chip->mutex);
- for (;;) {
- if (chip->state != FL_WRITING) {
- /* Someone's suspended the write. Sleep */
- DECLARE_WAITQUEUE(wait, current);
-
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
+ printk("erase suspended. Sleeping\n");
+
schedule();
remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ / 2); /* FIXME */
+#if 0
+ if (signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + (HZ*2); /* FIXME */
cfi_spin_lock(chip->mutex);
continue;
}
- if (chip_ready(map, adr))
- goto op_done;
-
- if( time_after(jiffies, timeo))
- break;
-
- /* Latency issues. Drop the lock, wait a while and retry */
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(1);
- cfi_spin_lock(chip->mutex);
- }
-
- printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
-
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
- /* FIXME - should have reset delay before continuing */
-
- ret = -EIO;
- op_done:
- chip->state = FL_READY;
- put_chip(map, chip, adr);
- cfi_spin_unlock(chip->mutex);
-
- return ret;
-}
-
-
-static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct cfi_private *cfi = map->fldrv_priv;
- int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
- int ret = 0;
- int chipnum;
- unsigned long ofs;
-
- *retlen = 0;
- if (!len)
- return 0;
-
- chipnum = to >> cfi->chipshift;
- ofs = to - (chipnum << cfi->chipshift);
+ /* Busy wait for 1/10 of a milisecond */
+ for(wait_reps = 0;
+ (wait_reps < 100)
+ && ( ( status ^ oldstatus ) & dq6 );
+ wait_reps++) {
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ cfi_spin_unlock(chip->mutex);
- /* If it's not bus-aligned, do the first word write */
- if (ofs & (map_bankwidth(map)-1)) {
- size_t local_len = (-ofs)&(map_bankwidth(map)-1);
- if (local_len > len)
- local_len = len;
- ret = cfi_amdstd_write_words(mtd, to, local_len,
- retlen, buf);
- if (ret)
- return ret;
- ofs += local_len;
- buf += local_len;
- len -= local_len;
+ cfi_udelay(1);
- if (ofs >> cfi->chipshift) {
- chipnum ++;
- ofs = 0;
- if (chipnum == cfi->numchips)
- return 0;
+ cfi_spin_lock(chip->mutex);
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
}
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
}
- /* Write buffer is worth it only if more than one word to write... */
- while (len >= map_bankwidth(map) * 2) {
- /* We must not cross write block boundaries */
- int size = wbufsize - (ofs & (wbufsize-1));
-
- if (size > len)
- size = len;
- if (size % map_bankwidth(map))
- size -= size % map_bankwidth(map);
-
- ret = do_write_buffer(map, &cfi->chips[chipnum],
- ofs, buf, size);
- if (ret)
- return ret;
-
- ofs += size;
- buf += size;
- (*retlen) += size;
- len -= size;
+ prev_oldstatus = oldstatus;
+ prev_status = status;
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
+
+ if ( cfi_buswidth_is_1() ) {
+ ones = (__u8)~0;
+ } else if ( cfi_buswidth_is_2() ) {
+ ones = (__u16)~0;
+ } else if ( cfi_buswidth_is_4() ) {
+ ones = (__u32)~0;
+ } else {
+ printk(KERN_WARNING "Unsupported buswidth\n");
+ goto erase_failed;
+ }
+
+ if ( oldstatus == ones && status == ones ) {
+ /* success - do nothing */
+ goto erase_done;
+ }
- if (ofs >> cfi->chipshift) {
- chipnum ++;
- ofs = 0;
- if (chipnum == cfi->numchips)
- return 0;
+ if ( ta ) {
+ int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
+ if ( status & dq5mask ) {
+ /* dq5 asserted - decode interleave chips */
+ printk( KERN_WARNING
+ "MTD %s(): FLASH internal timeout: 0x%.8x\n",
+ __func__,
+ status & dq5mask );
+ } else {
+ printk( KERN_WARNING
+ "MTD %s(): Software timed out during write.\n",
+ __func__ );
}
+ goto erase_failed;
}
- if (len) {
- size_t retlen_dregs = 0;
+ printk(KERN_WARNING
+ "MTD %s(): Wacky! Unable to decode failure status\n",
+ __func__ );
- ret = cfi_amdstd_write_words(mtd, to, len, &retlen_dregs, buf);
+ printk(KERN_WARNING
+ "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
+ __func__, adr, ones,
+ prev_oldstatus, prev_status,
+ oldstatus, status);
- *retlen += retlen_dregs;
- return ret;
- }
+ erase_failed:
+ ret = -EIO;
+ /* reset on all failures. */
+ cfi_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
- return 0;
+ erase_done:
+ DISABLE_VPP(map);
+ chip->state = FL_READY;
+ wake_up(&chip->wq);
+ cfi_spin_unlock(chip->mutex);
+ return ret;
}
-/*
- * Handle devices with one erase region, that only implement
- * the chip erase command.
- */
-static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
+static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
- struct cfi_private *cfi = map->fldrv_priv;
+ unsigned int oldstatus, status, prev_oldstatus, prev_status;
+ unsigned int dq6;
unsigned long timeo = jiffies + HZ;
- unsigned long int adr;
+ struct cfi_private *cfi = map->fldrv_priv;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ int ta = 0;
+ cfi_word ones = 0;
- adr = cfi->addr_unlock1;
-
+ retry:
cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, adr, FL_WRITING);
- if (ret) {
+
+ if (chip->state != FL_READY){
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
- return ret;
- }
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+#if 0
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + HZ;
+
+ goto retry;
+ }
+
+ chip->state = FL_ERASING;
+
+ adr += chip->start;
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
- __func__, chip->start );
+ __func__, adr );
ENABLE_VPP(map);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- chip->state = FL_ERASING;
- chip->erase_suspended = 0;
- chip->in_progress_block_addr = adr;
+ cfi_write(map, CMD(0x30), adr);
+
+ timeo = jiffies + (HZ*20);
- cfi_spin_unlock(chip->mutex);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((chip->erase_time*HZ)/(2*1000));
- cfi_spin_lock(chip->mutex);
+ /* Wait for the end of programing/erasure by using the toggle method.
+ * As long as there is a programming procedure going on, bit 6
+ * is toggling it's state with each consecutive read.
+ * The toggling stops as soon as the procedure is completed.
+ *
+ * If the process has gone on for too long on the chip bit 5 gets.
+ * After bit5 is set you can kill the operation by sending a reset
+ * command to the chip.
+ */
+ /* see comments in do_write_oneword */
+ dq6 = CMD(1<<6);
- timeo = jiffies + (HZ*20);
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
- for (;;) {
+ while( ( ( status ^ oldstatus ) & dq6 )
+ && ! ( ta = time_after(jiffies, timeo) ) ) {
+ int wait_reps;
+
+ /* an initial short sleep */
+ cfi_spin_unlock(chip->mutex);
+ schedule_timeout(HZ/100);
+ cfi_spin_lock(chip->mutex);
+
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
+
cfi_spin_unlock(chip->mutex);
+ printk(KERN_DEBUG "erase suspended. Sleeping\n");
+
schedule();
remove_wait_queue(&chip->wq, &wait);
+#if 0
+ if (signal_pending(current))
+ return -EINTR;
+#endif
+ timeo = jiffies + (HZ*2); /* FIXME */
cfi_spin_lock(chip->mutex);
continue;
}
- if (chip->erase_suspended) {
- /* This erase was suspended and resumed.
- Adjust the timeout */
- timeo = jiffies + (HZ*20); /* FIXME */
- chip->erase_suspended = 0;
+
+ /* Busy wait for 1/10 of a milisecond */
+ for(wait_reps = 0;
+ (wait_reps < 100)
+ && ( ( status ^ oldstatus ) & dq6 );
+ wait_reps++) {
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ cfi_spin_unlock(chip->mutex);
+
+ cfi_udelay(1);
+
+ cfi_spin_lock(chip->mutex);
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
}
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
+ }
- if (chip_ready(map, adr))
- goto op_done;
+ prev_oldstatus = oldstatus;
+ prev_status = status;
+ oldstatus = cfi_read(map, adr);
+ status = cfi_read(map, adr);
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): Check 0x%.8x 0x%.8x\n",
+ __func__, oldstatus, status );
+
+ if ( cfi_buswidth_is_1() ) {
+ ones = (__u8)~0;
+ } else if ( cfi_buswidth_is_2() ) {
+ ones = (__u16)~0;
+ } else if ( cfi_buswidth_is_4() ) {
+ ones = (__u32)~0;
+ } else {
+ printk(KERN_WARNING "Unsupported buswidth\n");
+ goto erase_failed;
+ }
- if (time_after(jiffies, timeo))
- break;
+ if ( oldstatus == ones && status == ones ) {
+ /* success - do nothing */
+ goto erase_done;
+ }
- /* Latency issues. Drop the lock, wait a while and retry */
- cfi_spin_unlock(chip->mutex);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- cfi_spin_lock(chip->mutex);
+ if ( ta ) {
+ int dq5mask = ( ( status ^ oldstatus ) & dq6 ) >> 1;
+ if ( status & dq5mask ) {
+ /* dq5 asserted - decode interleave chips */
+ printk( KERN_WARNING
+ "MTD %s(): FLASH internal timeout: 0x%.8x\n",
+ __func__,
+ status & dq5mask );
+ } else {
+ printk( KERN_WARNING
+ "MTD %s(): Software timed out during write.\n",
+ __func__ );
+ }
+ goto erase_failed;
}
- printk(KERN_WARNING "MTD %s(): software timeout\n",
+ printk(KERN_WARNING
+ "MTD %s(): Wacky! Unable to decode failure status\n",
__func__ );
+ printk(KERN_WARNING
+ "MTD %s(): 0x%.8lx(0x%.8llx): 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
+ __func__, adr, ones,
+ prev_oldstatus, prev_status,
+ oldstatus, status);
+
+ erase_failed:
+ ret = -EIO;
/* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ cfi_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- ret = -EIO;
- op_done:
+ erase_done:
+ DISABLE_VPP(map);
chip->state = FL_READY;
- put_chip(map, chip, adr);
+ wake_up(&chip->wq);
cfi_spin_unlock(chip->mutex);
-
return ret;
}
-
-typedef int (*frob_t)(struct map_info *map, struct flchip *chip,
- unsigned long adr, void *thunk);
-
-
-static int cfi_amdstd_varsize_frob(struct mtd_info *mtd, frob_t frob,
- loff_t ofs, size_t len, void *thunk)
+static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr;
+ unsigned long adr, len;
int chipnum, ret = 0;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
- if (ofs > mtd->size)
+ if (instr->addr > mtd->size)
return -EINVAL;
- if ((len + ofs) > mtd->size)
+ if ((instr->len + instr->addr) > mtd->size)
return -EINVAL;
/* Check that both start and end of the requested erase are
start of the requested erase, and then go back one.
*/
- while (i < mtd->numeraseregions && ofs >= regions[i].offset)
+ while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
i++;
i--;
effect here.
*/
- if (ofs & (regions[i].erasesize-1))
+ if (instr->addr & (regions[i].erasesize-1))
return -EINVAL;
/* Remember the erase region we start on */
* with the erase region at that address.
*/
- while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
+ while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
i++;
/* As before, drop back one to point at the region in which
*/
i--;
- if ((ofs + len) & (regions[i].erasesize-1))
+ if ((instr->addr + instr->len) & (regions[i].erasesize-1))
return -EINVAL;
-
- chipnum = ofs >> cfi->chipshift;
- adr = ofs - (chipnum << cfi->chipshift);
+
+ chipnum = instr->addr >> cfi->chipshift;
+ adr = instr->addr - (chipnum << cfi->chipshift);
+ len = instr->len;
i=first;
- while (len) {
- ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
-
+ while(len) {
+ ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+
if (ret)
return ret;
}
}
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback)
+ instr->callback(instr);
+
return 0;
}
-
-static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
+static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
{
+ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
- DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
-
- adr += chip->start;
+ unsigned long adr, len;
+ int chipnum, ret = 0;
- cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, adr, FL_ERASING);
- if (ret) {
- cfi_spin_unlock(chip->mutex);
- return ret;
- }
+ if (instr->addr & (mtd->erasesize - 1))
+ return -EINVAL;
- DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
- __func__, adr );
+ if (instr->len & (mtd->erasesize -1))
+ return -EINVAL;
- ENABLE_VPP(map);
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- map_write(map, CMD(0x30), adr);
+ if ((instr->len + instr->addr) > mtd->size)
+ return -EINVAL;
- chip->state = FL_ERASING;
- chip->erase_suspended = 0;
- chip->in_progress_block_addr = adr;
-
- cfi_spin_unlock(chip->mutex);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((chip->erase_time*HZ)/(2*1000));
- cfi_spin_lock(chip->mutex);
+ chipnum = instr->addr >> cfi->chipshift;
+ adr = instr->addr - (chipnum << cfi->chipshift);
+ len = instr->len;
- timeo = jiffies + (HZ*20);
+ while(len) {
+ ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
- for (;;) {
- if (chip->state != FL_ERASING) {
- /* Someone's suspended the erase. Sleep */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- cfi_spin_unlock(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- cfi_spin_lock(chip->mutex);
- continue;
- }
- if (chip->erase_suspended) {
- /* This erase was suspended and resumed.
- Adjust the timeout */
- timeo = jiffies + (HZ*20); /* FIXME */
- chip->erase_suspended = 0;
- }
+ if (ret)
+ return ret;
- if (chip_ready(map, adr))
- goto op_done;
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
- if (time_after(jiffies, timeo))
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
break;
-
- /* Latency issues. Drop the lock, wait a while and retry */
- cfi_spin_unlock(chip->mutex);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- cfi_spin_lock(chip->mutex);
+ }
}
-
- printk(KERN_WARNING "MTD %s(): software timeout\n",
- __func__ );
-
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
- /* FIXME - should have reset delay before continuing */
-
- ret = -EIO;
- op_done:
- chip->state = FL_READY;
- put_chip(map, chip, adr);
- cfi_spin_unlock(chip->mutex);
- return ret;
-}
-
-
-int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
-{
- unsigned long ofs, len;
- int ret;
-
- ofs = instr->addr;
- len = instr->len;
-
- ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
- if (ret)
- return ret;
-
+
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
-
static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
{
struct map_info *map = mtd->priv;
return ret;
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
-
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
schedule();
- remove_wait_queue(&chip->wq, &wait);
+ remove_wait_queue(&chip->wq, &wait);
goto retry;
}
/* Unlock the chips again */
if (ret) {
- for (i--; i >=0; i--) {
+ for (i--; i >=0; i--) {
chip = &cfi->chips[i];
cfi_spin_lock(chip->mutex);
return ret;
}
-
static void cfi_amdstd_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
- map_write(map, CMD(0xF0), chip->start);
+ cfi_write(map, CMD(0xF0), chip->start);
wake_up(&chip->wq);
}
else
}
}
-
-#ifdef DEBUG_LOCK_BITS
-
-static int do_printlockstatus_oneblock(struct map_info *map,
- struct flchip *chip,
- unsigned long adr,
- void *thunk)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- int ofs_factor = cfi->interleave * cfi->device_type;
-
- cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
- printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
- adr, cfi_read_query(map, adr+(2*ofs_factor)));
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
-
- return 0;
-}
-
-
-#define debug_dump_locks(mtd, frob, ofs, len, thunk) \
- cfi_amdstd_varsize_frob((mtd), (frob), (ofs), (len), (thunk))
-
-#else
-
-#define debug_dump_locks(...)
-
-#endif /* DEBUG_LOCK_BITS */
-
-
-struct xxlock_thunk {
- uint8_t val;
- flstate_t state;
-};
-
-
-#define DO_XXLOCK_ONEBLOCK_LOCK ((struct xxlock_thunk){0x01, FL_LOCKING})
-#define DO_XXLOCK_ONEBLOCK_UNLOCK ((struct xxlock_thunk){0x00, FL_UNLOCKING})
-
-
-/*
- * FIXME - this is *very* specific to a particular chip. It likely won't
- * work for all chips that require unlock. It also hasn't been tested
- * with interleaved chips.
- */
-static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- struct xxlock_thunk *xxlt = (struct xxlock_thunk *)thunk;
- int ret;
-
- /*
- * This is easy because these are writes to registers and not writes
- * to flash memory - that means that we don't have to check status
- * and timeout.
- */
-
- adr += chip->start;
- /*
- * lock block registers:
- * - on 64k boundariesand
- * - bit 1 set high
- * - block lock registers are 4MiB lower - overflow subtract (danger)
- */
- adr = ((adr & ~0xffff) | 0x2) + ~0x3fffff;
-
- cfi_spin_lock(chip->mutex);
- ret = get_chip(map, chip, adr, FL_LOCKING);
- if (ret) {
- cfi_spin_unlock(chip->mutex);
- return ret;
- }
-
- chip->state = xxlt->state;
- map_write(map, CMD(xxlt->val), adr);
-
- /* Done and happy. */
- chip->state = FL_READY;
- put_chip(map, chip, adr);
- cfi_spin_unlock(chip->mutex);
- return 0;
-}
-
-
-static int cfi_amdstd_lock_varsize(struct mtd_info *mtd,
- loff_t ofs,
- size_t len)
-{
- int ret;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
- __func__, ofs, len);
- debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
-
- ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
- (void *)&DO_XXLOCK_ONEBLOCK_LOCK);
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status after, ret=%d\n",
- __func__, ret);
-
- debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
-
- return ret;
-}
-
-
-static int cfi_amdstd_unlock_varsize(struct mtd_info *mtd,
- loff_t ofs,
- size_t len)
-{
- int ret;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
- __func__, ofs, len);
- debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
-
- ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len,
- (void *)&DO_XXLOCK_ONEBLOCK_UNLOCK);
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status after, ret=%d\n",
- __func__, ret);
- debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
-
- return ret;
-}
-
-
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
static char im_name[]="cfi_cmdset_0002";
-
int __init cfi_amdstd_init(void)
{
inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
return 0;
}
-
static void __exit cfi_amdstd_exit(void)
{
inter_module_unregister(im_name);
}
-
module_init(cfi_amdstd_init);
module_exit(cfi_amdstd_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0020.c,v 1.15 2004/08/09 13:19:43 dwmw2 Exp $
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
{
struct cfi_private *cfi = map->fldrv_priv;
int i;
+ __u32 base = cfi->chips[0].start;
if (cfi->cfi_mode) {
/*
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
+ int ofs_factor = cfi->interleave * cfi->device_type;
- extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
- if (!extp)
+ printk(" ST Microelectronics Extended Query Table at 0x%4.4X\n", adr);
+ if (!adr)
return NULL;
+ /* Switch it into Query Mode */
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ extp = kmalloc(sizeof(*extp), GFP_KERNEL);
+ if (!extp) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ return NULL;
+ }
+
+ /* Read in the Extended Query Table */
+ for (i=0; i<sizeof(*extp); i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, (base+((adr+i)*ofs_factor)));
+ }
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
+ printk(KERN_WARNING " Unknown staa Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
/* Do some byteswapping if necessary */
extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
cfi->chips[i].erase_time = 1024;
}
+ /* Make sure it's in read mode */
+ cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
return cfi_staa_setup(map);
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
- map_word status, status_OK;
+ __u32 status, status_OK;
unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
int suspended = 0;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
- cmd_addr = adr & ~(map_bankwidth(map)-1);
+ cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
*/
switch (chip->state) {
case FL_ERASING:
- if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
+ if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
goto sleep; /* We don't support erase suspend */
- map_write (map, CMD(0xb0), cmd_addr);
+ cfi_write (map, CMD(0xb0), cmd_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), cmd_addr);
+ cfi_write(map, CMD(0x70), cmd_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
// printk("Erase suspending at 0x%lx\n", cmd_addr);
for (;;) {
- status = map_read(map, cmd_addr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_addr);
+ if ((status & status_OK) == status_OK)
break;
if (time_after(jiffies, timeo)) {
/* Urgh */
- map_write(map, CMD(0xd0), cmd_addr);
+ cfi_write(map, CMD(0xd0), cmd_addr);
/* make sure we're in 'read status' mode */
- map_write(map, CMD(0x70), cmd_addr);
+ cfi_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
spin_unlock_bh(chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
- "suspended: status = 0x%lx\n", status.x[0]);
+ "suspended: status = 0x%x\n", status);
return -EIO;
}
}
suspended = 1;
- map_write(map, CMD(0xff), cmd_addr);
+ cfi_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
break;
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
- map_write(map, CMD(0x70), cmd_addr);
+ cfi_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
case FL_STATUS:
- status = map_read(map, cmd_addr);
- if (map_word_andequal(map, status, status_OK, status_OK)) {
- map_write(map, CMD(0xff), cmd_addr);
+ status = cfi_read(map, cmd_addr);
+ if ((status & status_OK) == status_OK) {
+ cfi_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
break;
}
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
return -EIO;
}
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), cmd_addr);
- map_write(map, CMD(0x70), cmd_addr);
+ cfi_write(map, CMD(0xd0), cmd_addr);
+ cfi_write(map, CMD(0x70), cmd_addr);
}
wake_up(&chip->wq);
unsigned long adr, const u_char *buf, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ __u32 status, status_OK;
unsigned long cmd_adr, timeo;
DECLARE_WAITQUEUE(wait, current);
int wbufsize, z;
/* M58LW064A requires bus alignment for buffer wriets -- saw */
- if (adr & (map_bankwidth(map)-1))
+ if (adr & (CFIDEV_BUSWIDTH-1))
return -EINVAL;
- wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
cmd_adr = adr & ~(wbufsize-1);
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
- map_write(map, CMD(0x70), cmd_adr);
+ cfi_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
#ifdef DEBUG_CFI_FEATURES
- printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
+ printk("%s: 1 status[%x]\n", __FUNCTION__, cfi_read(map, cmd_adr));
#endif
case FL_STATUS:
- status = map_read(map, cmd_adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_adr);
+ if ((status & status_OK) == status_OK)
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
- status.x[0], map_read(map, cmd_adr).x[0]);
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %x, status = %llx\n",
+ status, cfi_read(map, cmd_adr));
return -EIO;
}
}
ENABLE_VPP(map);
- map_write(map, CMD(0xe8), cmd_adr);
+ cfi_write(map, CMD(0xe8), cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;
z = 0;
for (;;) {
- status = map_read(map, cmd_adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_adr);
+ if ((status & status_OK) == status_OK)
break;
spin_unlock_bh(chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
- map_write(map, CMD(0x70), cmd_adr);
+ cfi_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x\n", status);
return -EIO;
}
}
/* Write length of data to come */
- map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
+ cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
/* Write data */
- for (z = 0; z < len;
- z += map_bankwidth(map), buf += map_bankwidth(map)) {
- map_word d;
- d = map_word_load(map, buf);
- map_write(map, d, adr+z);
+ for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
+ if (cfi_buswidth_is_1()) {
+ u8 *b = (u8 *)buf;
+
+ map_write8 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else if (cfi_buswidth_is_2()) {
+ u16 *b = (u16 *)buf;
+
+ map_write16 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else if (cfi_buswidth_is_4()) {
+ u32 *b = (u32 *)buf;
+
+ map_write32 (map, *b++, adr+z);
+ buf = (const u_char *)b;
+ } else {
+ DISABLE_VPP(map);
+ return -EINVAL;
+ }
}
/* GO GO GO */
- map_write(map, CMD(0xd0), cmd_adr);
+ cfi_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
spin_unlock_bh(chip->mutex);
continue;
}
- status = map_read(map, cmd_adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, cmd_adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
/* clear status */
- map_write(map, CMD(0x50), cmd_adr);
+ cfi_write(map, CMD(0x50), cmd_adr);
/* put back into read status register mode */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
chip->state = FL_STATUS;
/* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
- if (map_word_bitsset(map, status, CMD(0x3a))) {
+ if ((status & CMD(0x02)) || (status & CMD(0x08)) ||
+ (status & CMD(0x10)) || (status & CMD(0x20))) {
#ifdef DEBUG_CFI_FEATURES
- printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
+ printk("%s: 2 status[%x]\n", __FUNCTION__, status);
#endif
- /* clear status */
- map_write(map, CMD(0x50), cmd_adr);
- /* put back into read status register mode */
- map_write(map, CMD(0x70), adr);
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
- }
+ /* clear status */
+ cfi_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ cfi_write(map, CMD(0x70), adr);
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return (status & CMD(0x02)) ? -EROFS : -EIO;
+ }
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
unsigned long ofs;
ofs = to - (chipnum << cfi->chipshift);
#ifdef DEBUG_CFI_FEATURES
- printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
+ printk("%s: CFIDEV_BUSWIDTH[%x]\n", __FUNCTION__, CFIDEV_BUSWIDTH);
printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
#endif
static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ __u32 status, status_OK;
unsigned long timeo;
int retries = 3;
DECLARE_WAITQUEUE(wait, current);
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
case FL_STATUS:
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* Urgh. Chip not yet ready to talk to us. */
ENABLE_VPP(map);
/* Clear the status register first */
- map_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x50), adr);
/* Now erase */
- map_write(map, CMD(0x20), adr);
- map_write(map, CMD(0xD0), adr);
+ cfi_write(map, CMD(0x20), adr);
+ cfi_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
spin_unlock_bh(chip->mutex);
continue;
}
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %llx.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
ret = 0;
/* We've broken this before. It doesn't hurt to be safe */
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- status = map_read(map, adr);
+ status = cfi_read(map, adr);
/* check for lock bit */
- if (map_word_bitsset(map, status, CMD(0x3a))) {
- unsigned char chipstatus = status.x[0];
- if (!map_word_equal(map, status, CMD(chipstatus))) {
- int i, w;
- for (w=0; w<map_words(map); w++) {
- for (i = 0; i<cfi_interleave(cfi); i++) {
- chipstatus |= status.x[w] >> (cfi->device_type * 8);
- }
+ if (status & CMD(0x3a)) {
+ unsigned char chipstatus = status;
+ if (status != CMD(status & 0xff)) {
+ int i;
+ for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
+ chipstatus |= status >> (cfi->device_type * 8);
}
- printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
- status.x[0], chipstatus);
+ printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
}
/* Reset the error bits */
- map_write(map, CMD(0x50), adr);
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x50), adr);
+ cfi_write(map, CMD(0x70), adr);
if ((chipstatus & 0x30) == 0x30) {
- printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
+ printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
ret = -EIO;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
- printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
+ printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
ret = -EIO;
} else if (chipstatus & 0x20) {
if (retries--) {
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
spin_unlock_bh(chip->mutex);
goto retry;
}
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
ret = -EIO;
}
}
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ __u32 status, status_OK;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
case FL_STATUS:
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* Urgh. Chip not yet ready to talk to us. */
}
ENABLE_VPP(map);
- map_write(map, CMD(0x60), adr);
- map_write(map, CMD(0x01), adr);
+ cfi_write(map, CMD(0x60), adr);
+ cfi_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
spin_unlock_bh(chip->mutex);
timeo = jiffies + (HZ*2);
for (;;) {
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %llx.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word status, status_OK;
+ __u32 status, status_OK;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
case FL_STATUS:
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* Urgh. Chip not yet ready to talk to us. */
}
ENABLE_VPP(map);
- map_write(map, CMD(0x60), adr);
- map_write(map, CMD(0xD0), adr);
+ cfi_write(map, CMD(0x60), adr);
+ cfi_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
spin_unlock_bh(chip->mutex);
timeo = jiffies + (HZ*2);
for (;;) {
- status = map_read(map, adr);
- if (map_word_andequal(map, status, status_OK, status_OK))
+ status = cfi_read(map, adr);
+ if ((status & status_OK) == status_OK)
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
- map_write(map, CMD(0x70), adr);
+ cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %llx.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
- map_write(map, CMD(0xFF), 0);
+ cfi_write(map, CMD(0xFF), 0);
chip->state = FL_READY;
wake_up(&chip->wq);
}
kfree(cfi);
}
+#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
+#define cfi_staa_init init_module
+#define cfi_staa_exit cleanup_module
+#endif
+
static char im_name[]="cfi_cmdset_0020";
int __init cfi_staa_init(void)
/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: cfi_probe.c,v 1.77 2004/07/14 08:38:44 dwmw2 Exp $
+ $Id: cfi_probe.c,v 1.71 2003/05/28 12:51:48 dwmw2 Exp $
*/
#include <linux/config.h>
#endif
static int cfi_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi);
+ struct flchip *chips, struct cfi_private *cfi);
static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
struct mtd_info *cfi_probe(struct map_info *map);
in: interleave,type,mode
ret: table index, <0 for error
*/
-static int qry_present(struct map_info *map, __u32 base,
+static inline int qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
int osf = cfi->interleave * cfi->device_type; // scale factor
- map_word val;
- map_word qry;
- qry = cfi_build_cmd('Q', map, cfi);
- val = map_read(map, base + osf*0x10);
+ if (cfi_read(map,base+osf*0x10)==cfi_build_cmd('Q',map,cfi) &&
+ cfi_read(map,base+osf*0x11)==cfi_build_cmd('R',map,cfi) &&
+ cfi_read(map,base+osf*0x12)==cfi_build_cmd('Y',map,cfi))
+ return 1; // ok !
- if (!map_word_equal(map, qry, val))
- return 0;
-
- qry = cfi_build_cmd('R', map, cfi);
- val = map_read(map, base + osf*0x11);
-
- if (!map_word_equal(map, qry, val))
- return 0;
-
- qry = cfi_build_cmd('Y', map, cfi);
- val = map_read(map, base + osf*0x12);
-
- if (!map_word_equal(map, qry, val))
- return 0;
-
- return 1; // nothing found
+ return 0; // nothing found
}
static int cfi_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi)
+ struct flchip *chips, struct cfi_private *cfi)
{
int i;
return 0;
}
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
if (!qry_present(map,base,cfi))
}
/* Check each previous chip to see if it's an alias */
- for (i=0; i < (base >> cfi->chipshift); i++) {
- unsigned long start;
- if(!test_bit(i, chip_map)) {
- /* Skip location; no valid chip at this address */
- continue;
- }
- start = i << cfi->chipshift;
+ for (i=0; i<cfi->numchips; i++) {
/* This chip should be in read mode if it's one
we've already touched. */
- if (qry_present(map, start, cfi)) {
+ if (qry_present(map,chips[i].start,cfi)) {
/* Eep. This chip also had the QRY marker.
* Is it an alias for the new one? */
- cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
/* If the QRY marker goes away, it's an alias */
- if (!qry_present(map, start, cfi)) {
+ if (!qry_present(map, chips[i].start, cfi)) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, start);
+ map->name, base, chips[i].start);
return 0;
}
/* Yes, it's actually got QRY for data. Most
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
if (qry_present(map, base, cfi)) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, start);
+ map->name, base, chips[i].start);
return 0;
}
}
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
- set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ if (cfi->numchips == MAX_CFI_CHIPS) {
+ printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
+ /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
+ return -1;
+ }
+ chips[cfi->numchips].start = base;
+ chips[cfi->numchips].state = FL_READY;
cfi->numchips++;
/* Put it back into Read Mode */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
map->name, cfi->interleave, cfi->device_type*8, base,
- map->bankwidth*8);
+ map->buswidth*8);
return 1;
}
memset(cfi->cfiq,0,sizeof(struct cfi_ident));
cfi->cfi_mode = CFI_MODE_CFI;
+ cfi->fast_prog=1; /* CFI supports fast programming */
/* Read the CFI info structure */
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) {
(cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
#endif
}
-
- /* Note we put the device back into Read Mode BEFORE going into Auto
- * Select Mode, as some devices support nesting of modes, others
- * don't. This way should always work.
- * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
- * so should be treated as nops or illegal (and so put the device
- * back into Read Mode, which is a nop in this case).
- */
- cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query(map, base);
- cfi->id = cfi_read_query(map, base + ofs_factor);
-
/* Put it back into Read Mode */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- /* ... even if it's an Intel chip */
- cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
-
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
- map->name, cfi->interleave, cfi->device_type*8, base,
- map->bankwidth*8);
return 1;
}
printk("No Alternate Algorithm Table\n");
- printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
- printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
+ printk("Vcc Minimum: %x.%x V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
+ printk("Vcc Maximum: %x.%x V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
if (cfip->VppMin) {
- printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
- printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
+ printk("Vpp Minimum: %x.%x V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
+ printk("Vpp Maximum: %x.%x V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
}
else
printk("No Vpp line\n");
+++ /dev/null
-/*
- * Common Flash Interface support:
- * Generic utility functions not dependant on command set
- *
- * Copyright (C) 2002 Red Hat
- * Copyright (C) 2003 STMicroelectronics Limited
- *
- * This code is covered by the GPL.
- *
- * $Id: cfi_util.c,v 1.4 2004/07/14 08:38:44 dwmw2 Exp $
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/cfi.h>
-#include <linux/mtd/compatmac.h>
-
-struct cfi_extquery *
-cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- __u32 base = 0; // cfi->chips[0].start;
- int ofs_factor = cfi->interleave * cfi->device_type;
- int i;
- struct cfi_extquery *extp = NULL;
-
- printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
- if (!adr)
- goto out;
-
- /* Switch it into Query Mode */
- cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
-
- extp = kmalloc(size, GFP_KERNEL);
- if (!extp) {
- printk(KERN_ERR "Failed to allocate memory\n");
- goto out;
- }
-
- /* Read in the Extended Query Table */
- for (i=0; i<size; i++) {
- ((unsigned char *)extp)[i] =
- cfi_read_query(map, base+((adr+i)*ofs_factor));
- }
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
- printk(KERN_WARNING " Unknown %s Extended Query "
- "version %c.%c.\n", name, extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- extp = NULL;
- goto out;
- }
-
-out:
- /* Make sure it's in read mode */
- cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
-
- return extp;
-}
-
-EXPORT_SYMBOL(cfi_read_pri);
-
-void cfi_fixup(struct map_info *map, struct cfi_fixup* fixups)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- struct cfi_fixup *f;
-
- for (f=fixups; f->fixup; f++) {
- if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
- ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
- f->fixup(map, f->param);
- }
- }
-}
-
-EXPORT_SYMBOL(cfi_fixup);
-
-MODULE_LICENSE("GPL");
/*
- * $Id: chipreg.c,v 1.16 2003/05/29 09:36:15 dwmw2 Exp $
+ * $Id: chipreg.c,v 1.15 2003/05/21 15:15:05 dwmw2 Exp $
*
* Registration for chip drivers
*
* Routines common to all CFI-type probes.
* (C) 2001-2003 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.c,v 1.19 2004/07/13 22:33:32 dwmw2 Exp $
+ * $Id: gen_probe.c,v 1.13 2003/06/25 11:50:37 dwmw2 Exp $
*/
#include <linux/kernel.h>
EXPORT_SYMBOL(mtd_do_chip_probe);
-static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
+struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
{
+ unsigned long base=0;
struct cfi_private cfi;
struct cfi_private *retcfi;
- unsigned long *chip_map;
- int i, j, mapsize;
- int max_chips;
+ struct flchip chip[MAX_CFI_CHIPS];
+ int i;
memset(&cfi, 0, sizeof(cfi));
+ memset(&chip[0], 0, sizeof(chip));
/* Call the probetype-specific code with all permutations of
interleave and device type, etc. */
return NULL;
}
#endif
+ chip[0].start = 0;
+ chip[0].state = FL_READY;
cfi.chipshift = cfi.cfiq->DevSize;
- if (cfi_interleave_is_1(&cfi)) {
- ;
- } else if (cfi_interleave_is_2(&cfi)) {
+ switch(cfi.interleave) {
+#ifdef CFIDEV_INTERLEAVE_1
+ case 1:
+ break;
+#endif
+#ifdef CFIDEV_INTERLEAVE_2
+ case 2:
cfi.chipshift++;
- } else if (cfi_interleave_is_4((&cfi))) {
- cfi.chipshift += 2;
- } else if (cfi_interleave_is_8(&cfi)) {
- cfi.chipshift += 3;
- } else {
+ break;
+#endif
+#ifdef CFIDEV_INTERLEAVE_4
+ case 4:
+ cfi.chipshift+=2;
+ break;
+#endif
+ default:
BUG();
}
cfi.numchips = 1;
- /*
- * Allocate memory for bitmap of valid chips.
- * Align bitmap storage size to full byte.
- */
- max_chips = map->size >> cfi.chipshift;
- mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
- chip_map = kmalloc(mapsize, GFP_KERNEL);
- if (!chip_map) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
- kfree(cfi.cfiq);
- return NULL;
- }
- memset (chip_map, 0, mapsize);
-
- set_bit(0, chip_map); /* Mark first chip valid */
-
/*
* Now probe for other chips, checking sensibly for aliases while
* we're at it. The new_chip probe above should have let the first
* chip in read mode.
+ *
+ * NOTE: Here, we're checking if there is room for another chip
+ * the same size within the mapping. Therefore,
+ * base + chipsize <= map->size is the correct thing to do,
+ * because, base + chipsize would be the _first_ byte of the
+ * next chip, not the one we're currently pondering.
*/
- for (i = 1; i < max_chips; i++) {
- cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
- }
+ for (base = (1<<cfi.chipshift); base + (1<<cfi.chipshift) <= map->size;
+ base += (1<<cfi.chipshift))
+ cp->probe_chip(map, base, &chip[0], &cfi);
/*
* Now allocate the space for the structures we need to return to
if (!retcfi) {
printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
kfree(cfi.cfiq);
- kfree(chip_map);
return NULL;
}
memcpy(retcfi, &cfi, sizeof(cfi));
- memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
-
- for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
- if(test_bit(i, chip_map)) {
- struct flchip *pchip = &retcfi->chips[j++];
-
- pchip->start = (i << cfi.chipshift);
- pchip->state = FL_READY;
- init_waitqueue_head(&pchip->wq);
- spin_lock_init(&pchip->_spinlock);
- pchip->mutex = &pchip->_spinlock;
- }
+ memcpy(&retcfi->chips[0], chip, sizeof(struct flchip) * cfi.numchips);
+
+ /* Fix up the stuff that breaks when you move it */
+ for (i=0; i< retcfi->numchips; i++) {
+ init_waitqueue_head(&retcfi->chips[i].wq);
+ spin_lock_init(&retcfi->chips[i]._spinlock);
+ retcfi->chips[i].mutex = &retcfi->chips[i]._spinlock;
}
- kfree(chip_map);
return retcfi;
}
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
struct cfi_private *cfi)
{
- int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
- int max_chips = map_bankwidth(map); /* And minimum 1 */
- int nr_chips, type;
-
- for (nr_chips = min_chips; nr_chips <= max_chips; nr_chips <<= 1) {
-
- if (!cfi_interleave_supported(nr_chips))
- continue;
-
- cfi->interleave = nr_chips;
-
- for (type = 0; type < 3; type++) {
- cfi->device_type = 1<<type;
-
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
- }
+ switch (map->buswidth) {
+#ifdef CFIDEV_BUSWIDTH_1
+ case CFIDEV_BUSWIDTH_1:
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+ break;
+#endif /* CFIDEV_BUSWITDH_1 */
+
+#ifdef CFIDEV_BUSWIDTH_2
+ case CFIDEV_BUSWIDTH_2:
+#ifdef CFIDEV_INTERLEAVE_1
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_1 */
+#ifdef CFIDEV_INTERLEAVE_2
+ cfi->interleave = CFIDEV_INTERLEAVE_2;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_2 */
+ break;
+#endif /* CFIDEV_BUSWIDTH_2 */
+
+#ifdef CFIDEV_BUSWIDTH_4
+ case CFIDEV_BUSWIDTH_4:
+#if defined(CFIDEV_INTERLEAVE_1) && defined(SOMEONE_ACTUALLY_MAKES_THESE)
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_1 */
+#ifdef CFIDEV_INTERLEAVE_2
+ cfi->interleave = CFIDEV_INTERLEAVE_2;
+
+#ifdef SOMEONE_ACTUALLY_MAKES_THESE
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_2 */
+#ifdef CFIDEV_INTERLEAVE_4
+ cfi->interleave = CFIDEV_INTERLEAVE_4;
+
+#ifdef SOMEONE_ACTUALLY_MAKES_THESE
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_4 */
+ break;
+#endif /* CFIDEV_BUSWIDTH_4 */
+
+#ifdef CFIDEV_BUSWIDTH_8
+ case CFIDEV_BUSWIDTH_8:
+#if defined(CFIDEV_INTERLEAVE_2) && defined(SOMEONE_ACTUALLY_MAKES_THESE)
+ cfi->interleave = CFIDEV_INTERLEAVE_2;
+
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_2 */
+#ifdef CFIDEV_INTERLEAVE_4
+ cfi->interleave = CFIDEV_INTERLEAVE_4;
+
+#ifdef SOMEONE_ACTUALLY_MAKES_THESE
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_4 */
+#ifdef CFIDEV_INTERLEAVE_8
+ cfi->interleave = CFIDEV_INTERLEAVE_8;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_8 */
+ break;
+#endif /* CFIDEV_BUSWIDTH_8 */
+
+ default:
+ printk(KERN_WARNING "genprobe_new_chip called with unsupported buswidth %d\n", map->buswidth);
+ return 0;
}
return 0;
}
+
typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
extern cfi_cmdset_fn_t cfi_cmdset_0001;
* not going to guess how to send commands to them, plus I expect they will
* all speak CFI..
*
- * $Id: jedec.c,v 1.21 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: jedec.c,v 1.19 2003/05/29 09:25:23 dwmw2 Exp $
*/
#include <linux/init.h>
//printk("done\n");
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
#undef flread
/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: jedec_probe.c,v 1.51 2004/07/14 14:44:30 thayne Exp $
+ $Id: jedec_probe.c,v 1.29 2003/05/28 13:57:46 dwmw2 Exp $
See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
for the standard this probe goes back to.
-
- Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
/* AMD */
-#define AM29DL800BB 0x22C8
-#define AM29DL800BT 0x224A
-
#define AM29F800BB 0x2258
#define AM29F800BT 0x22D6
-#define AM29LV400BB 0x22BA
-#define AM29LV400BT 0x22B9
#define AM29LV800BB 0x225B
#define AM29LV800BT 0x22DA
#define AM29LV160DT 0x22C4
#define AT49BV32XT 0x00C9
/* Fujitsu */
-#define MBM29F040C 0x00A4
#define MBM29LV650UE 0x22D7
#define MBM29LV320TE 0x22F6
#define MBM29LV320BE 0x22F9
#define MBM29LV160BE 0x2249
#define MBM29LV800BA 0x225B
#define MBM29LV800TA 0x22DA
-#define MBM29LV400TC 0x22B9
-#define MBM29LV400BC 0x22BA
-
/* Intel */
#define I28F004B3T 0x00d4
#define M50FW016 0x002E
/* SST */
-#define SST29EE020 0x0010
-#define SST29LE020 0x0012
#define SST29EE512 0x005d
#define SST29LE512 0x003d
#define SST39LF800 0x2781
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
-#define SST49LF004B 0x0060
-#define SST49LF008A 0x005a
#define SST49LF030A 0x001C
#define SST49LF040A 0x0051
#define SST49LF080A 0x005B
const __u16 dev_id;
const char *name;
const int DevSize;
+ const int InterfaceDesc;
const int NumEraseRegions;
const int CmdSet;
- const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
- const ulong regions[6];
+ const __u8 uaddr[3]; /* unlock addrs for 8, 16, 32 modes */
+ const ulong regions[4];
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV400BB,
- .name = "AMD AM29LV400BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
- .regions = {
- ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,7)
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV400BT,
- .name = "AMD AM29LV400BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
- .regions = {
- ERASEINFO(0x10000,7),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
}, {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV800BB,
ERASEINFO(0x10000,15),
}
}, {
-/* add DL */
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29DL800BB,
- .name = "AMD AM29DL800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
- .regions = {
- ERASEINFO(0x04000,1),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,4),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x04000,1),
- ERASEINFO(0x10000,14)
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29DL800BT,
- .name = "AMD AM29DL800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
- .regions = {
- ERASEINFO(0x10000,14),
- ERASEINFO(0x04000,1),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,4),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x04000,1)
- }
- }, {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29F040C,
- .name = "Fujitsu MBM29F040C",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
- .regions = {
- ERASEINFO(0x10000,8)
- }
}, {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV650UE,
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29LV400BC,
- .name = "Fujitsu MBM29LV400BC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
- .regions = {
- ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,7)
- }
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29LV400TC,
- .name = "Fujitsu MBM29LV400TC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
- .regions = {
- ERASEINFO(0x10000,7),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
}, {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3B,
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST29EE020,
- .name = "SST 29EE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- regions: {ERASEINFO(0x01000,64),
- }
- }, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST29LE020,
- .name = "SST 29LE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- regions: {ERASEINFO(0x01000,64),
- }
- }, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.regions = {
ERASEINFO(0x01000,64),
}
- }, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST49LF004B,
- .name = "SST 49LF004B",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
- .regions = {
- ERASEINFO(0x01000,128),
- }
- }, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST49LF008A,
- .name = "SST 49LF008A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
- .regions = {
- ERASEINFO(0x01000,256),
- }
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF030A,
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
static int jedec_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi);
+ struct flchip *chips, struct cfi_private *cfi);
struct mtd_info *jedec_probe(struct map_info *map);
static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
- map_word result;
- unsigned long mask;
+ u32 result, mask;
mask = (1 << (cfi->device_type * 8)) -1;
- result = map_read(map, base);
- return result.x[0] & mask;
+ result = cfi_read(map, base);
+ result &= mask;
+ return result;
}
static inline u32 jedec_read_id(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
int osf;
- map_word result;
- unsigned long mask;
+ u32 result, mask;
osf = cfi->interleave *cfi->device_type;
mask = (1 << (cfi->device_type * 8)) -1;
- result = map_read(map, base + osf);
- return result.x[0] & mask;
+ result = cfi_read(map, base + osf);
+ result &= mask;
+ return result;
}
static inline void jedec_reset(u32 base, struct map_info *map,
struct cfi_private *cfi)
{
/* Reset */
-
- /* after checking the datasheets for SST, MACRONIX and ATMEL
- * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
- * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
- * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
- * as they will ignore the writes and dont care what address
- * the F0 is written to */
- if(cfi->addr_unlock1) {
- /*printk("reset unlock called %x %x \n",cfi->addr_unlock1,cfi->addr_unlock2);*/
- cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- }
-
- cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
/* Some misdesigned intel chips do not respond for 0xF0 for a reset,
* so ensure we're in read mode. Send both the Intel and the AMD command
* for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
uaddr = finfo->uaddr[uaddr_idx];
- if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
- /* ASSERT("The unlock addresses for non-8-bit mode
- are bollocks. We don't really need an array."); */
- uaddr = finfo->uaddr[0];
- }
-
uaddr_done:
return uaddr;
}
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
{
int i,num_erase_regions;
- unsigned long mask;
__u8 uaddr;
printk("Found: %s\n",jedec_table[index].name);
p_cfi->id = jedec_table[index].dev_id;
uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ if ( MTD_UADDR_NOT_SUPPORTED ) {
kfree( p_cfi->cfiq );
return 0;
}
-
- /* Mask out address bits which are smaller than the device type */
- mask = ~(p_cfi->device_type-1);
- p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 & mask;
- p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 & mask;
+ p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
+ p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
return 1; /* ok */
}
int rc = 0; /* failure until all tests pass */
u32 mfr, id;
__u8 uaddr;
- unsigned long mask;
- /*
- * The IDs must match. For X16 and X32 devices operating in
- * a lower width ( X8 or X16 ), the device ID's are usually just
- * the lower byte(s) of the larger device ID for wider mode. If
- * a part is found that doesn't fit this assumption (device id for
- * smaller width mode is completely unrealated to full-width mode)
- * then the jedec_table[] will have to be augmented with the IDs
- * for different widths.
- */
- switch (cfi->device_type) {
- case CFI_DEVICETYPE_X8:
- mfr = (__u8)finfo->mfr_id;
- id = (__u8)finfo->dev_id;
- break;
- case CFI_DEVICETYPE_X16:
- mfr = (__u16)finfo->mfr_id;
- id = (__u16)finfo->dev_id;
- break;
- case CFI_DEVICETYPE_X32:
- mfr = (__u16)finfo->mfr_id;
- id = (__u32)finfo->dev_id;
- break;
- default:
- printk(KERN_WARNING
- "MTD %s(): Unsupported device type %d\n",
- __func__, cfi->device_type);
- goto match_done;
- }
- if ( cfi->mfr != mfr || cfi->id != id ) {
+ /* The ID's must match */
+ if ( cfi->mfr != finfo->mfr_id || cfi->id != finfo->dev_id ) {
goto match_done;
}
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
__func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
- if ( base + cfi->interleave * ( 1 << finfo->DevSize ) > map->size ) {
+ if ( base + ( 1 << finfo->DevSize ) > map->size ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
}
uaddr = finfo_uaddr(finfo, cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ if ( MTD_UADDR_NOT_SUPPORTED ) {
goto match_done;
}
- mask = ~(cfi->device_type-1);
-
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
- && ( (unlock_addrs[uaddr].addr1 & mask) != cfi->addr_unlock1 ||
- (unlock_addrs[uaddr].addr2 & mask) != cfi->addr_unlock2 ) ) {
+ && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1
+ || unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
DEBUG( MTD_DEBUG_LEVEL3,
- "MTD %s(): 0x%.4lx 0x%.4lx did not match\n",
+ "MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
- unlock_addrs[uaddr].addr1 & mask,
- unlock_addrs[uaddr].addr2 & mask);
+ unlock_addrs[uaddr].addr1,
+ unlock_addrs[uaddr].addr2 );
goto match_done;
}
static int jedec_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi)
+ struct flchip *chips, struct cfi_private *cfi)
{
int i;
- enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ int unlockpass = 0;
- retry:
+ /*
+ * FIXME - eventually replace these unlock address seeds with
+ * information from unlock_addrs[].
+ */
if (!cfi->numchips) {
- unsigned long mask = ~(cfi->device_type-1);
-
- uaddr_idx++;
-
- if (MTD_UADDR_UNNECESSARY == uaddr_idx)
- return 0;
-
- /* Mask out address bits which are smaller than the device type */
- cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 & mask;
- cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 & mask;
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ break;
+ case CFI_DEVICETYPE_X16:
+ cfi->addr_unlock1 = 0xaaa;
+ if (map->buswidth == cfi->interleave) {
+ /* X16 chip(s) in X8 mode */
+ cfi->addr_unlock2 = 0x555;
+ } else {
+ cfi->addr_unlock2 = 0x554;
+ }
+ break;
+ case CFI_DEVICETYPE_X32:
+ cfi->addr_unlock1 = 0x1555;
+ cfi->addr_unlock2 = 0xaaa;
+ break;
+ default:
+ printk(KERN_NOTICE "Eep. Unknown jedec_probe device type %d\n", cfi->device_type);
+ return 0;
+ }
}
+ retry:
/* Make certain we aren't probing past the end of map */
if (base >= map->size) {
printk(KERN_NOTICE
cfi->mfr = jedec_read_mfr(map, base, cfi);
cfi->id = jedec_read_id(map, base, cfi);
- DEBUG(MTD_DEBUG_LEVEL3,
- "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ printk(KERN_INFO "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi->interleave, cfi->device_type);
for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
goto ok_out;
}
}
- goto retry;
+ switch(unlockpass++) {
+ case 0:
+ cfi->addr_unlock1 |= cfi->addr_unlock1 << 4;
+ cfi->addr_unlock2 |= cfi->addr_unlock2 << 4;
+ goto retry;
+ case 1:
+ cfi->addr_unlock1 = cfi->addr_unlock2 = 0;
+ goto retry;
+ }
+ return 0;
} else {
__u16 mfr;
__u16 id;
}
}
- /* Check each previous chip locations to see if it's an alias */
- for (i=0; i < (base >> cfi->chipshift); i++) {
- unsigned long start;
- if(!test_bit(i, chip_map)) {
- continue; /* Skip location; no valid chip at this address */
- }
- start = i << cfi->chipshift;
- if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
- jedec_read_id(map, start, cfi) == cfi->id) {
+ /* Check each previous chip to see if it's an alias */
+ for (i=0; i<cfi->numchips; i++) {
+ /* This chip should be in read mode if it's one
+ we've already touched. */
+ if (jedec_read_mfr(map, chips[i].start, cfi) == cfi->mfr &&
+ jedec_read_id(map, chips[i].start, cfi) == cfi->id) {
/* Eep. This chip also looks like it's in autoselect mode.
Is it an alias for the new one? */
- jedec_reset(start, map, cfi);
+ jedec_reset(chips[i].start, map, cfi);
/* If the device IDs go away, it's an alias */
if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
jedec_read_id(map, base, cfi) != cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, start);
+ map->name, base, chips[i].start);
return 0;
}
if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
jedec_read_id(map, base, cfi) == cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, start);
+ map->name, base, chips[i].start);
return 0;
}
}
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
- set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ if (cfi->numchips == MAX_CFI_CHIPS) {
+ printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
+ /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
+ return -1;
+ }
+ chips[cfi->numchips].start = base;
+ chips[cfi->numchips].state = FL_READY;
cfi->numchips++;
ok_out:
/* Put it back into Read Mode */
jedec_reset(base, map, cfi);
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
map->name, cfi->interleave, cfi->device_type*8, base,
- map->bankwidth*8);
+ map->buswidth*8);
return 1;
}
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_ram.c,v 1.20 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: map_ram.c,v 1.17 2003/05/28 12:51:49 dwmw2 Exp $
*/
#include <linux/module.h>
/* Yeah, it's inefficient. Who cares? It's faster than a _real_
flash erase. */
struct map_info *map = (struct map_info *)mtd->priv;
- map_word allff;
unsigned long i;
- allff = map_word_ff(map);
+ for (i=0; i<instr->len; i++)
+ map_write8(map, 0xFF, instr->addr + i);
- for (i=0; i<instr->len; i += map_bankwidth(map))
- map_write(map, allff, instr->addr + i);
-
- instr->state = MTD_ERASE_DONE;
-
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
/*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_rom.c,v 1.21 2004/07/12 14:06:01 dwmw2 Exp $
+ * $Id: map_rom.c,v 1.20 2003/05/28 12:51:49 dwmw2 Exp $
*/
#include <linux/module.h>
* Copyright 2000,2001 David A. Schleef <ds@schleef.org>
* 2000,2001 Lineo, Inc.
*
- * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: sharp.c,v 1.12 2003/05/28 15:39:52 dwmw2 Exp $
*
* Devices supported:
* LH28F016SCT Symmetrical block flash memory, 2Mx8
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if(instr->callback)
+ instr->callback(instr);
return 0;
}
/*
- * $Id: cmdlinepart.c,v 1.14 2004/07/12 12:34:23 dwmw2 Exp $
+ * $Id: cmdlinepart.c,v 1.9 2003/05/16 17:08:24 dwmw2 Exp $
*
* Read flash partition table from command line
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
* <partdef> := <size>[@offset][<name>][ro]
- * <mtd-id> := unique name used in mapping driver/device (mtd->name)
+ * <mtd-id> := unique id used in mapping driver/device
* <size> := standard linux memsize OR "-" to denote all remaining space
* <name> := '(' NAME ')'
*
return register_mtd_parser(&cmdline_parser);
}
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.12 2004/08/10 13:12:18 dwmw2 Exp $
+# $Id: Kconfig,v 1.4 2003/05/28 15:18:54 dwmw2 Exp $
menu "Self-contained MTD device drivers"
depends on MTD!=n
depends on MTD && PCI
---help---
This provides a MTD device driver for the Ramix PMC551 RAM PCI card
- from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
- These devices come in memory configurations from 32M - 1G. If you
- have one, you probably want to enable this.
+ from Ramix Inc. <http://www.ramix.com/products/>. These devices come
+ in memory configurations from 32M - 1G. If you have one, you
+ probably want to enable this.
If this driver is compiled as a module you get the ability to select
the size of the aperture window pointing into the devices memory.
config MTD_MS02NV
tristate "DEC MS02-NV NVRAM module support"
- depends on MTD && MACH_DECSTATION
+ depends on CONFIG_MACH_DECSTATION
help
- This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery
- backed-up NVRAM module. The module was originally meant as an NFS
- accelerator. Say Y here if you have a DECstation 5000/2x0 or a
- DECsystem 5900 equipped with such a module.
+ Support for NVRAM module on DECstation.
config MTD_SLRAM
tristate "Uncached system RAM"
you can still use it for storage or swap by using this driver to
present it to the system as a Memory Technology Device.
-config MTD_PHRAM
- tristate "Physical system RAM"
- depends on MTD
- help
- This is a re-implementation of the slram driver above.
-
- Use this driver to access physical memory that the kernel proper
- doesn't have access to, memory beyond the mem=xxx limit, nvram,
- memory on the video card, etc...
-
config MTD_LART
tristate "28F160xx flash driver for LART"
depends on SA1100_LART && MTD
comment "Disk-On-Chip Device Drivers"
config MTD_DOC2000
- tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ tristate "M-Systems Disk-On-Chip 2000 and Millennium"
depends on MTD
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
emulate a block device by using a kind of file system on the flash
chips.
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
config MTD_DOC2001
- tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (see help)"
depends on MTD
---help---
This provides an alternative MTD device driver for the M-Systems
emulate a block device by using a kind of file system on the flash
chips.
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
depends on MTD
to emulate a block device by using a kind of file system on the
flash chips.
- NOTE: This driver will soon be replaced by the new DiskOnChip driver
- under "NAND Flash Device Drivers" (currently that driver does not
- support all Millennium Plus devices).
-
config MTD_DOCPROBE
tristate
- default m if MTD_DOC2001!=y && MTD_DOC2000!=y && MTD_DOC2001PLUS!=y && (MTD_DOC2001=m || MTD_DOC2000=m || MTD_DOC2001PLUS=m)
+ default m if MTD_DOC2001!=y && MTD_DOC2000!=y && MTD_DOC2001PLUS!=y && (MTD_DOC2001=m || MTD_DOC2000=m || MOD_DOC2001PLUS=m)
default y if MTD_DOC2001=y || MTD_DOC2000=y || MTD_DOC2001PLUS=y
help
- This isn't a real config option; it's derived.
-
-config MTD_DOCECC
- tristate
- default m if MTD_DOCPROBE!=y && MTD_NAND_DISKONCHIP!=y && (MTD_DOCPROBE=m || MTD_NAND_DISKONCHIP=m)
- default y if MTD_DOCPROBE=y || MTD_NAND_DISKONCHIP=y
- help
- This isn't a real config option; it's derived.
+ This isn't a real config option, it's derived.
config MTD_DOCPROBE_ADVANCED
bool "Advanced detection options for DiskOnChip"
#
# linux/drivers/devices/Makefile
#
-# $Id: Makefile.common,v 1.6 2004/07/12 16:07:30 dwmw2 Exp $
+# $Id: Makefile.common,v 1.3 2003/05/28 10:54:23 dwmw2 Exp $
# *** BIG UGLY NOTE ***
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
obj-$(CONFIG_MTD_DOC2001) += doc2001.o
obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
-obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
-obj-$(CONFIG_MTD_DOCECC) += docecc.o
+obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o docecc.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
-obj-$(CONFIG_MTD_PHRAM) += phram.o
obj-$(CONFIG_MTD_PMC551) += pmc551.o
obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
/*
- * $Id: blkmtd.c,v 1.23 2004/08/09 14:03:19 dwmw2 Exp $
+ * $Id: blkmtd-25.c,v 1.5 2003/07/16 06:48:27 spse Exp $
*
* blkmtd.c - use a block device as a fake MTD
*
/* Default erase size in K, always make it a multiple of PAGE_SIZE */
#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
-#define VERSION "$Revision: 1.23 $"
+#define VERSION "$Revision: 1.5 $"
/* Info for the block device */
struct blkmtd_dev {
pagenr = to >> PAGE_SHIFT;
offset = to & ~PAGE_MASK;
- DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
+ DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n",
buf, (long)to, len, pagenr, offset);
/* see if we have to do a partial write at the start */
down(&dev->wrbuf_mutex);
- DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
+ DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n",
start_len, len, end_len, pagecnt);
if(start_len) {
/* do partial start region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
pagenr, start_len, offset);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(end_len) {
/* do the third region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
pagenr, end_len);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(bio)
blkmtd_write_out(bio);
- DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
+ DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
up(&dev->wrbuf_mutex);
if(retlen)
size_t from;
u_long len;
int err = -EIO;
- size_t retlen;
+ int retlen;
instr->state = MTD_ERASING;
from = instr->addr;
len = instr->len;
/* check erase region has valid start and length */
- DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n",
mtd->name+9, from, len);
while(numregions) {
DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
if(!numregions) {
/* Not a valid erase block */
- err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
+ err("erase: invalid erase request 0x%lX @ 0x%08X", len, from);
instr->state = MTD_ERASE_FAILED;
err = -EIO;
}
if(instr->state != MTD_ERASE_FAILED) {
/* do the erase */
- DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
+ DEBUG(3, "Doing erase from = %d len = %ld\n", from, len);
err = write_pages(dev, NULL, from, len, &retlen);
if(err || retlen != len) {
err("erase failed err = %d", err);
}
DEBUG(3, "blkmtd: erase: checking callback\n");
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
return err;
}
int pagenr, pages;
size_t thislen = 0;
- DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
- mtd->name+9, from, len, buf);
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
+ mtd->name+9, (long int)from, len, buf);
if(from > mtd->size)
return -EINVAL;
readerr:
if(retlen)
*retlen = thislen;
- DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err);
+ DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", thislen, err);
return err;
}
if(!len)
return 0;
- DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
- mtd->name+9, to, len, buf);
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
+ mtd->name+9, (long int)to, len, buf);
if(to >= mtd->size) {
return -ENOSPC;
{
struct mtd_erase_region_info *info = NULL;
- DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
+ DEBUG(2, "calc_erase_regions, es = %d size = %d regions = %d\n",
erase_size, total_size, *regions);
/* Make any user specified erasesize be a power of 2
and at least PAGE_SIZE */
break;
}
} while(!(*regions));
- DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
+ DEBUG(2, "calc_erase_regions done, es = %d size = %d regions = %d\n",
erase_size, total_size, *regions);
return info;
}
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2000.c,v 1.62 2004/08/09 14:04:02 dwmw2 Exp $
+ * $Id: doc2000.c,v 1.53 2003/06/11 09:45:19 dwmw2 Exp $
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
#define DOC_SUPPORT_2000
-#define DOC_SUPPORT_2000TSOP
#define DOC_SUPPORT_MILLENNIUM
#ifdef DOC_SUPPORT_2000
#define DoC_is_2000(doc) (0)
#endif
-#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
+#ifdef DOC_SUPPORT_MILLENNIUM
#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
#else
#define DoC_is_Millennium(doc) (0)
static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *unused);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
-static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen,
- u_char *eccbuf, struct nand_oobinfo *oobsel);
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *unused);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
/* Out-of-line routine to wait for chip response */
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
if (time_after(jiffies, timeo)) {
DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
return -EIO;
/* Send the command */
WriteDOC_(command, docptr, doc->ioreg);
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, WritePipeTerm);
/* Lower the CLE line */
WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
}
}
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
-
DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
/* FIXME: The SlowIO's for millennium could be replaced by
/* Read the manufacturer and device id codes from the device */
- if (DoC_is_Millennium(doc)) {
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- mfr = ReadDOC(doc->virtadr, LastDataRead);
+ /* CDSN Slow IO register see Software Requirement 11.4 item 5. */
+ dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ mfr = ReadDOC_(doc->virtadr, doc->ioreg);
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- id = ReadDOC(doc->virtadr, LastDataRead);
- } else {
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- mfr = ReadDOC_(doc->virtadr, doc->ioreg);
-
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- id = ReadDOC_(doc->virtadr, doc->ioreg);
- }
+ /* CDSN Slow IO register see Software Requirement 11.4 item 5. */
+ dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ id = ReadDOC_(doc->virtadr, doc->ioreg);
/* No response - return failure */
if (mfr == 0xff || mfr == 0)
if (!doc->mfr) {
doc->mfr = mfr;
doc->id = id;
- doc->chipshift =
- ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
- doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
+ doc->chipshift =
+ nand_flash_ids[i].chipshift;
+ doc->page256 = nand_flash_ids[i].page256;
+ doc->pageadrlen =
+ nand_flash_ids[i].chipshift > 25 ? 3 : 2;
doc->erasesize =
nand_flash_ids[i].erasesize;
return 1;
/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
+static void DoC_ScanChips(struct DiskOnChip *this)
{
int floor, chip;
int numchips[MAX_FLOORS];
+ int maxchips = MAX_CHIPS;
int ret = 1;
this->numchips = 0;
this->mfr = 0;
this->id = 0;
+ if (DoC_is_Millennium(this))
+ maxchips = MAX_CHIPS_MIL;
+
/* For each floor, find the number of valid chips it contains */
for (floor = 0; floor < MAX_FLOORS; floor++) {
ret = 1;
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
struct DiskOnChip *old = NULL;
- int maxchips;
/* We must avoid being called twice for the same device. */
switch (this->ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- mtd->name = "DiskOnChip 2000 TSOP";
- this->ioreg = DoC_Mil_CDSN_IO;
- /* Pretend it's a Millennium */
- this->ChipID = DOC_ChipID_DocMil;
- maxchips = MAX_CHIPS;
- break;
case DOC_ChipID_Doc2k:
mtd->name = "DiskOnChip 2000";
this->ioreg = DoC_2k_CDSN_IO;
- maxchips = MAX_CHIPS;
break;
case DOC_ChipID_DocMil:
mtd->name = "DiskOnChip Millennium";
this->ioreg = DoC_Mil_CDSN_IO;
- maxchips = MAX_CHIPS_MIL;
break;
- default:
- printk("Unknown ChipID 0x%02x\n", this->ChipID);
- kfree(mtd);
- iounmap((void *) this->virtadr);
- return;
}
printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
mtd->write = doc_write;
mtd->read_ecc = doc_read_ecc;
mtd->write_ecc = doc_write_ecc;
- mtd->writev_ecc = doc_writev_ecc;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
mtd->sync = NULL;
init_MUTEX(&this->lock);
/* Ident all the chips present. */
- DoC_ScanChips(this, maxchips);
+ DoC_ScanChips(this);
if (!this->totlen) {
kfree(mtd);
}
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
+ size_t * retlen, u_char * buf, u_char * eccbuf,
+ struct nand_oobinfo *unused)
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
unsigned long docptr;
unsigned char syndrome[6];
volatile char dummy;
int i, len256 = 0, ret=0;
- size_t left = len;
docptr = this->virtadr;
down(&this->lock);
- *retlen = 0;
- while (left) {
- len = left;
+ /* Don't allow a single read to cross a 512-byte block boundary */
+ if (from + len > ((from | 0x1ff) + 1))
+ len = ((from | 0x1ff) + 1) - from;
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
+ /* The ECC will not be calculated correctly if less than 512 is read */
+ if (len != 0x200 && eccbuf)
+ printk(KERN_WARNING
+ "ECC needs a full sector read (adr: %lx size %lx)\n",
+ (long) from, (long) len);
- /* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector read (adr: %lx size %lx)\n",
- (long) from, (long) len);
+ /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
- /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
+ /* Find the chip which is to be used and select it */
+ mychip = &this->chips[from >> (this->chipshift)];
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[from >> (this->chipshift)];
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(this, mychip->floor);
+ DoC_SelectChip(this, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(this, mychip->chip);
+ }
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
+ DoC_Command(this,
+ (!this->page256
+ && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
+ CDSN_CTRL_WP);
+ DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
+ CDSN_CTRL_ECC_IO);
+
+ if (eccbuf) {
+ /* Prime the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ } else {
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ }
- DoC_Command(this,
- (!this->page256
- && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
- CDSN_CTRL_ECC_IO);
+ /* treat crossing 256-byte sector for 2M x 8bits devices */
+ if (this->page256 && from + len > (from | 0xff) + 1) {
+ len256 = (from | 0xff) + 1 - from;
+ DoC_ReadBuf(this, buf, len256);
- if (eccbuf) {
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
- } else {
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- }
+ DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
+ DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
+ CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
+ }
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && from + len > (from | 0xff) + 1) {
- len256 = (from | 0xff) + 1 - from;
- DoC_ReadBuf(this, buf, len256);
+ DoC_ReadBuf(this, &buf[len256], len - len256);
- DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
- CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
- }
+ /* Let the caller know we completed it */
+ *retlen = len;
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- if (eccbuf) {
- /* Read the ECC data through the DiskOnChip ECC logic */
- /* Note: this will work even with 2M x 8bit devices as */
- /* they have 8 bytes of OOB per 256 page. mf. */
- DoC_ReadBuf(this, eccbuf, 6);
-
- /* Flush the pipeline */
- if (DoC_is_Millennium(this)) {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- i = ReadDOC(docptr, ECCConf);
- } else {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- i = ReadDOC(docptr, 2k_ECCStatus);
- }
+ if (eccbuf) {
+ /* Read the ECC data through the DiskOnChip ECC logic */
+ /* Note: this will work even with 2M x 8bit devices as */
+ /* they have 8 bytes of OOB per 256 page. mf. */
+ DoC_ReadBuf(this, eccbuf, 6);
- /* Check the ECC Status */
- if (i & 0x80) {
- int nb_errors;
- /* There was an ECC error */
+ /* Flush the pipeline */
+ if (DoC_is_Millennium(this)) {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ i = ReadDOC(docptr, ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ i = ReadDOC(docptr, 2k_ECCStatus);
+ }
+
+ /* Check the ECC Status */
+ if (i & 0x80) {
+ int nb_errors;
+ /* There was an ECC error */
#ifdef ECC_DEBUG
- printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
+ printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] =
- ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
+ /* Read the ECC syndrom through the DiskOnChip ECC logic.
+ These syndrome will be all ZERO when there is no error */
+ for (i = 0; i < 6; i++) {
+ syndrome[i] =
+ ReadDOC(docptr, ECCSyndrome0 + i);
+ }
+ nb_errors = doc_decode_ecc(buf, syndrome);
#ifdef ECC_DEBUG
- printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
+ printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the read. Not that
- this can be told to user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by checking *retlen */
- ret = -EIO;
- }
- }
+ if (nb_errors < 0) {
+ /* We return error, but have actually done the read. Not that
+ this can be told to user-space, via sys_read(), but at least
+ MTD-aware stuff can know about it by checking *retlen */
+ ret = -EIO;
+ }
+ }
#ifdef PSYCHO_DEBUG
- printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
+ printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
+ eccbuf[3], eccbuf[4], eccbuf[5]);
#endif
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
- }
-
- /* according to 11.4.1, we need to wait for the busy line
- * drop if we read to the end of the page. */
- if(0 == ((from + len) & 0x1ff))
- {
- DoC_WaitReady(this);
- }
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
+ }
- from += len;
- left -= len;
- buf += len;
+ /* according to 11.4.1, we need to wait for the busy line
+ * drop if we read to the end of the page. */
+ if(0 == ((from + *retlen) & 0x1ff))
+ {
+ DoC_WaitReady(this);
}
up(&this->lock);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf,
- u_char * eccbuf, struct nand_oobinfo *oobsel)
+ u_char * eccbuf,
+ struct nand_oobinfo *unused)
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
volatile char dummy;
int len256 = 0;
struct Nand *mychip;
- size_t left = len;
- int status;
docptr = this->virtadr;
down(&this->lock);
- *retlen = 0;
- while (left) {
- len = left;
-
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ((to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
+ /* Don't allow a single write to cross a 512-byte block boundary */
+ if (to + len > ((to | 0x1ff) + 1))
+ len = ((to | 0x1ff) + 1) - to;
- /* The ECC will not be calculated correctly if less than 512 is written */
-/* DBB-
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
- -DBB */
-
- /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
+ /* The ECC will not be calculated correctly if less than 512 is written */
+ if (len != 0x200 && eccbuf)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[to >> (this->chipshift)];
+ /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Set device to main plane of flash */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_Command(this,
- (!this->page256
- && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
-
- if (eccbuf) {
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
- } else {
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- }
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && to + len > (to | 0xff) + 1) {
- len256 = (to | 0xff) + 1 - to;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
-
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- up(&this->lock);
- return -EIO;
- }
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
- CDSN_CTRL_ECC_IO);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
+ /* Find the chip which is to be used and select it */
+ mychip = &this->chips[to >> (this->chipshift)];
- if (eccbuf) {
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(this, mychip->floor);
+ DoC_SelectChip(this, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(this, mychip->chip);
+ }
- if (DoC_is_Millennium(this)) {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- } else {
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
+ /* Set device to main plane of flash */
+ DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
+ DoC_Command(this,
+ (!this->page256
+ && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
+ CDSN_CTRL_WP);
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (di = 0; di < 6; di++) {
- eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
- }
+ DoC_Command(this, NAND_CMD_SEQIN, 0);
+ DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
- /* Reset the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (eccbuf) {
+ /* Prime the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ } else {
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ }
-#ifdef PSYCHO_DEBUG
- printk
- ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
- }
+ /* treat crossing 256-byte sector for 2M x 8bits devices */
+ if (this->page256 && to + len > (to | 0xff) + 1) {
+ len256 = (to | 0xff) + 1 - to;
+ DoC_WriteBuf(this, buf, len256);
DoC_Command(this, NAND_CMD_PAGEPROG, 0);
DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
/* There's an implicit DoC_WaitReady() in DoC_Command */
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
- if (status & 1) {
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
return -EIO;
}
- /* Let the caller know we completed it */
- *retlen += len;
-
- if (eccbuf) {
- unsigned char x[8];
- size_t dummy;
- int ret;
-
- /* Write the ECC data to flash */
- for (di=0; di<6; di++)
- x[di] = eccbuf[di];
-
- x[6]=0x55;
- x[7]=0x55;
-
- ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
- if (ret) {
- up(&this->lock);
- return ret;
- }
- }
-
- to += len;
- left -= len;
- buf += len;
+ DoC_Command(this, NAND_CMD_SEQIN, 0);
+ DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
+ CDSN_CTRL_ECC_IO);
}
- up(&this->lock);
- return 0;
-}
-
-static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen,
- u_char *eccbuf, struct nand_oobinfo *oobsel)
-{
- static char static_buf[512];
- static DECLARE_MUTEX(writev_buf_sem);
+ DoC_WriteBuf(this, &buf[len256], len - len256);
- size_t totretlen = 0;
- size_t thisvecofs = 0;
- int ret= 0;
+ if (eccbuf) {
+ WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
+ CDSNControl);
- down(&writev_buf_sem);
+ if (DoC_is_Millennium(this)) {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ } else {
+ WriteDOC_(0, docptr, this->ioreg);
+ WriteDOC_(0, docptr, this->ioreg);
+ WriteDOC_(0, docptr, this->ioreg);
+ }
- while(count) {
- size_t thislen, thisretlen;
- unsigned char *buf;
+ /* Read the ECC data through the DiskOnChip ECC logic */
+ for (di = 0; di < 6; di++) {
+ eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
+ }
- buf = vecs->iov_base + thisvecofs;
- thislen = vecs->iov_len - thisvecofs;
+ /* Reset the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#ifdef PSYCHO_DEBUG
+ printk
+ ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
+ eccbuf[4], eccbuf[5]);
+#endif
+ }
- if (thislen >= 512) {
- thislen = thislen & ~(512-1);
- thisvecofs += thislen;
- } else {
- /* Not enough to fill a page. Copy into buf */
- memcpy(static_buf, buf, thislen);
- buf = &static_buf[thislen];
-
- while(count && thislen < 512) {
- vecs++;
- count--;
- thisvecofs = min((512-thislen), vecs->iov_len);
- memcpy(buf, vecs->iov_base, thisvecofs);
- thislen += thisvecofs;
- buf += thisvecofs;
- }
- buf = static_buf;
- }
- if (count && thisvecofs == vecs->iov_len) {
- thisvecofs = 0;
- vecs++;
- count--;
- }
- ret = doc_write_ecc(mtd, to, thislen, &thisretlen, buf, eccbuf, oobsel);
+ DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- totretlen += thisretlen;
+ DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
+ /* There's an implicit DoC_WaitReady() in DoC_Command */
- if (ret || thisretlen != thislen)
- break;
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
- to += thislen;
- }
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
+ printk(KERN_ERR "Error programming flash\n");
+ /* Error in programming */
+ *retlen = 0;
+ up(&this->lock);
+ return -EIO;
+ }
- up(&writev_buf_sem);
- *retlen = totretlen;
- return ret;
+ /* Let the caller know we completed it */
+ *retlen = len;
+
+ if (eccbuf) {
+ unsigned char x[8];
+ size_t dummy;
+ int ret;
+
+ /* Write the ECC data to flash */
+ for (di=0; di<6; di++)
+ x[di] = eccbuf[di];
+
+ x[6]=0x55;
+ x[7]=0x55;
+
+ ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
+ up(&this->lock);
+ return ret;
+ }
+ up(&this->lock);
+ return 0;
}
-
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, u_char * buf)
{
unsigned long docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
volatile int dummy;
- int status;
// printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
// buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
DoC_Command(this, NAND_CMD_STATUS, 0);
/* DoC_WaitReady() is implicit in DoC_Command */
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
- if (status & 1) {
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
DoC_Command(this, NAND_CMD_STATUS, 0);
/* DoC_WaitReady() is implicit in DoC_Command */
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
- if (status & 1) {
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
volatile int dummy;
unsigned long docptr;
struct Nand *mychip;
- int status;
down(&this->lock);
DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
/* There was an error */
instr->state = MTD_ERASE_FAILED;
instr->state = MTD_ERASE_DONE;
callback:
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
up(&this->lock);
return 0;
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001.c,v 1.44 2004/08/09 14:04:24 dwmw2 Exp $
+ * $Id: doc2001.c,v 1.41 2003/06/11 09:45:19 dwmw2 Exp $
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel);
+ struct nand_oobinfo *unused);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel);
+ struct nand_oobinfo *unused);
+
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
doc->mfr = mfr;
doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
+ doc->chipshift = nand_flash_ids[i].chipshift;
break;
}
}
static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel)
+ struct nand_oobinfo *unused)
{
int i, ret;
volatile char dummy;
static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel)
+ struct nand_oobinfo *unused)
{
int i,ret = 0;
volatile char dummy;
instr->state = MTD_ERASE_DONE;
dummy = ReadDOC(docptr, LastDataRead);
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001plus.c,v 1.9 2004/08/09 13:19:44 dwmw2 Exp $
- *
- * Released under GPL
+ * $Id: doc2001plus.c,v 1.5 2003/06/11 09:45:19 dwmw2 Exp $
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
* | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
* +-----------+-------+-------+-------+--------------+---------+-----------+
*/
-/* FIXME: This lives in INFTL not here. Other users of flash devices
- may not want it */
static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
{
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
-
- if (this->interleave) {
- unsigned int ofs = *from & 0x3ff;
- unsigned int cmd;
-
- if (ofs < 512) {
- cmd = NAND_CMD_READ0;
- ofs &= 0x1ff;
- } else if (ofs < 1014) {
- cmd = NAND_CMD_READ1;
- ofs = (ofs & 0x1ff) + 10;
- } else {
- cmd = NAND_CMD_READOOB;
- ofs = ofs - 1014;
- }
+ unsigned int ofs = *from & 0x3ff;
+ unsigned int cmd;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
+ if (ofs < 512) {
+ cmd = NAND_CMD_READ0;
+ ofs &= 0x1ff;
+ } else if (ofs < 1014) {
+ cmd = NAND_CMD_READ1;
+ ofs = (ofs & 0x1ff) + 10;
} else {
- /* No interleave */
- if ((*from) & 0x100)
- return NAND_CMD_READ1;
- return NAND_CMD_READ0;
+ cmd = NAND_CMD_READOOB;
+ ofs = ofs - 1014;
}
+
+ *from = (*from & ~0x3ff) | ofs;
+ return cmd;
}
static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
mfr = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
+ dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
id = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
+ dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
dummy = ReadDOC(docptr, Mplus_LastDataRead);
dummy = ReadDOC(docptr, Mplus_LastDataRead);
nand_manuf_ids[j].name, nand_flash_ids[i].name);
doc->mfr = mfr;
doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
+ doc->interleave = 0;
+ if (doc->ChipID == DOC_ChipID_DocMilPlus32)
+ doc->interleave = 1;
+ doc->chipshift = nand_flash_ids[i].chipshift;
doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
break;
}
this->mfr = 0;
this->id = 0;
- /* Work out the intended interleave setting */
- this->interleave = 0;
- if (this->ChipID == DOC_ChipID_DocMilPlus32)
- this->interleave = 1;
-
- /* Check the ASIC agrees */
- if ( (this->interleave << 2) !=
- (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
- u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
- printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
- this->interleave?"on (16-bit)":"off (8-bit)");
- conf ^= 4;
- WriteDOC(this->virtadr, conf, Mplus_Configuration);
- }
-
/* For each floor, find the number of valid chips it contains */
for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
numchips[floor] = 0;
return -EINVAL;
/* Determine position of OOB flags, before or after data */
- before = (this->interleave && (to & 0x200));
+ before = to & 0x200;
DoC_CheckASIC(docptr);
/* Figure out which region we are accessing... */
fofs = ofs;
base = ofs & 0xf;
- if (!this->interleave) {
- DoC_Command(docptr, NAND_CMD_READOOB, 0);
- size = 16 - base;
- } else if (base < 6) {
+ if (base < 6) {
DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
size = 6 - base;
} else if (base < 8) {
/* Figure out which region we are accessing... */
fofs = ofs;
base = ofs & 0x0f;
- if (!this->interleave) {
- WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
- size = 16 - base;
- } else if (base < 6) {
+ if (base < 6) {
WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
size = 6 - base;
} else if (base < 8) {
/* Disable flash internally */
WriteDOC(0, docptr, Mplus_FlashSelect);
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
/* (C) 1999 Machine Vision Holdings, Inc. */
/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
-/* $Id: docprobe.c,v 1.41 2003/12/03 10:19:57 dwmw2 Exp $ */
+/* $Id: docprobe.c,v 1.36 2003/05/23 11:29:34 dwmw2 Exp $ */
window, DOCControl);
#endif /* !DOC_PASSIVE_PROBE */
- /* We need to read the ChipID register four times. For some
- newer DiskOnChip 2000 units, the first three reads will
- return the DiskOnChip Millennium ident. Don't ask. */
ChipID = ReadDOC(window, ChipID);
switch (ChipID) {
break;
case DOC_ChipID_DocMil:
- /* Check for the new 2000 with Millennium ASIC */
- ReadDOC(window, ChipID);
- ReadDOC(window, ChipID);
- if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
- ChipID = DOC_ChipID_Doc2kTSOP;
-
/* Check the TOGGLE bit in the ECC register */
tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
if (tmp != tmpb && tmp == tmpc)
return ChipID;
+ break;
default:
break;
}
default:
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
+#ifndef CONFIG_MTD_DOCPROBE_55AA
+ printk(KERN_WARNING "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
ChipID, physadr);
#endif
#ifndef DOC_PASSIVE_PROBE
return;
if ((ChipID = doccheck(docptr, physadr))) {
- if (ChipID == DOC_ChipID_Doc2kTSOP) {
- /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
- printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
- iounmap((void *)docptr);
- return;
- }
docfound = 1;
mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
sprintf(namebuf, "with ChipID %2.2X", ChipID);
switch(ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- name="2000 TSOP";
- im_funcname = "DoC2k_init";
- im_modname = "doc2000";
- break;
-
case DOC_ChipID_Doc2k:
name="2000";
im_funcname = "DoC2k_init";
/*
* MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
*
- * $Id: lart.c,v 1.7 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: lart.c,v 1.5 2003/05/20 21:03:07 dwmw2 Exp $
*
* Author: Abraham vd Merwe <abraham@2d3d.co.za>
*
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) instr->callback (instr);
return (0);
}
/*
- * Copyright (c) 2001 Maciej W. Rozycki
+ * Copyright (c) 2001 Maciej W. Rozycki
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
*
- * $Id: ms02-nv.c,v 1.7 2004/07/29 14:16:45 macro Exp $
+ * $Id: ms02-nv.c,v 1.4 2003/05/20 21:03:07 dwmw2 Exp $
*/
#include <linux/init.h>
static char version[] __initdata =
- "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
+ "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
-MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_AUTHOR("Maciej W. Rozycki <macro@ds2.pg.gda.pl>");
MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
MODULE_LICENSE("GPL");
/*
* Addresses we probe for an MS02-NV at. Modules may be located
- * at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
- * boundary within a 0MiB up to 448MiB range. We don't support a module
- * at 0MiB, though.
+ * at any 8MB boundary within a 0MB up to 112MB range or at any 32MB
+ * boundary within a 0MB up to 448MB range. We don't support a module
+ * at 0MB, though.
*/
static ulong ms02nv_addrs[] __initdata = {
0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
int ret = -ENODEV;
- /* The module decodes 8MiB of address space. */
+ /* The module decodes 8MB of address space. */
mod_res = kmalloc(sizeof(*mod_res), GFP_KERNEL);
if (!mod_res)
return -ENOMEM;
goto err_out_csr_res;
}
- printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %uMiB.\n",
+ printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %uMB.\n",
mtd->index, ms02nv_name, addr, size >> 20);
mp->next = root_ms02nv_mtd;
switch (mips_machtype) {
case MACH_DS5000_200:
- csr = (volatile u32 *)KN02_CSR_BASE;
+ csr = (volatile u32 *)KN02_CSR_ADDR;
if (*csr & KN02_CSR_BNK32M)
stride = 2;
break;
case MACH_DS5000_2X0:
- case MACH_DS5900:
+ case MACH_DS5000:
csr = (volatile u32 *)KN03_MCR_BASE;
if (*csr & KN03_MCR_BNK32M)
stride = 2;
/*
- * Copyright (c) 2001, 2003 Maciej W. Rozycki
+ * Copyright (c) 2001 Maciej W. Rozycki
*
- * DEC MS02-NV (54-20948-01) battery backed-up NVRAM module for
- * DECstation/DECsystem 5000/2x0 and DECsystem 5900 and 5900/260
- * systems.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * $Id: ms02-nv.h,v 1.3 2003/08/19 09:25:36 dwmw2 Exp $
+ * $Id: ms02-nv.h,v 1.1 2002/09/13 13:46:55 dwmw2 Exp $
*/
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
-/*
- * Addresses are decoded as follows:
- *
- * 0x000000 - 0x3fffff SRAM
- * 0x400000 - 0x7fffff CSR
- *
- * Within the SRAM area the following ranges are forced by the system
- * firmware:
- *
- * 0x000000 - 0x0003ff diagnostic area, destroyed upon a reboot
- * 0x000400 - ENDofRAM storage area, available to operating systems
- *
- * but we can't really use the available area right from 0x000400 as
- * the first word is used by the firmware as a status flag passed
- * from an operating system. If anything but the valid data magic
- * ID value is found, the firmware considers the SRAM clean, i.e.
- * containing no valid data, and disables the battery resulting in
- * data being erased as soon as power is switched off. So the choice
- * for the start address of the user-available is 0x001000 which is
- * nicely page aligned. The area between 0x000404 and 0x000fff may
- * be used by the driver for own needs.
- *
- * The diagnostic area defines two status words to be read by an
- * operating system, a magic ID to distinguish a MS02-NV board from
- * anything else and a status information providing results of tests
- * as well as the size of SRAM available, which can be 1MiB or 2MiB
- * (that's what the firmware handles; no idea if 2MiB modules ever
- * existed).
- *
- * The firmware only handles the MS02-NV board if installed in the
- * last (15th) slot, so for any other location the status information
- * stored in the SRAM cannot be relied upon. But from the hardware
- * point of view there is no problem using up to 14 such boards in a
- * system -- only the 1st slot needs to be filled with a DRAM module.
- * The MS02-NV board is ECC-protected, like other MS02 memory boards.
- *
- * The state of the battery as provided by the CSR is reflected on
- * the two onboard LEDs. When facing the battery side of the board,
- * with the LEDs at the top left and the battery at the bottom right
- * (i.e. looking from the back side of the system box), their meaning
- * is as follows (the system has to be powered on):
- *
- * left LED battery disable status: lit = enabled
- * right LED battery condition status: lit = OK
- */
-
/* MS02-NV iomem register offsets. */
#define MS02NV_CSR 0x400000 /* control & status register */
-/* MS02-NV CSR status bits. */
-#define MS02NV_CSR_BATT_OK 0x01 /* battery OK */
-#define MS02NV_CSR_BATT_OFF 0x02 /* battery disabled */
-
-
/* MS02-NV memory offsets. */
#define MS02NV_DIAG 0x0003f8 /* diagnostic status */
#define MS02NV_MAGIC 0x0003fc /* MS02-NV magic ID */
-#define MS02NV_VALID 0x000400 /* valid data magic ID */
-#define MS02NV_RAM 0x001000 /* user-exposed RAM start */
+#define MS02NV_RAM 0x000400 /* general-purpose RAM start */
-/* MS02-NV diagnostic status bits. */
-#define MS02NV_DIAG_TEST 0x01 /* SRAM test done (?) */
-#define MS02NV_DIAG_RO 0x02 /* SRAM r/o test done */
-#define MS02NV_DIAG_RW 0x04 /* SRAM r/w test done */
-#define MS02NV_DIAG_FAIL 0x08 /* SRAM test failed */
-#define MS02NV_DIAG_SIZE_MASK 0xf0 /* SRAM size mask */
-#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* SRAM size shift (left) */
+/* MS02-NV diagnostic status constants. */
+#define MS02NV_DIAG_SIZE_MASK 0xf0 /* RAM size mask */
+#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* RAM size shift (left) */
/* MS02-NV general constants. */
#define MS02NV_ID 0x03021966 /* MS02-NV magic ID value */
-#define MS02NV_VALID_ID 0xbd100248 /* valid data magic ID value */
#define MS02NV_SLOT_SIZE 0x800000 /* size of the address space
decoded by the module */
-
typedef volatile u32 ms02nv_uint;
struct ms02nv_private {
/*
* mtdram - a test mtd device
- * $Id: mtdram.c,v 1.33 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: mtdram.c,v 1.32 2003/05/21 15:15:07 dwmw2 Exp $
* Author: Alexander Larsson <alex@cendio.se>
*
* Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ (*(instr->callback))(instr);
return 0;
}
+++ /dev/null
-/**
- *
- * $Id: phram.c,v 1.2 2004/08/09 13:19:44 dwmw2 Exp $
- *
- * Copyright (c) Jochen Schaeuble <psionic@psionic.de>
- * 07/2003 rewritten by Joern Engel <joern@wh.fh-wedel.de>
- *
- * DISCLAIMER: This driver makes use of Rusty's excellent module code,
- * so it will not work for 2.4 without changes and it wont work for 2.4
- * as a module without major changes. Oh well!
- *
- * Usage:
- *
- * one commend line parameter per device, each in the form:
- * phram=<name>,<start>,<len>
- * <name> may be up to 63 characters.
- * <start> and <len> can be octal, decimal or hexadecimal. If followed
- * by "k", "M" or "G", the numbers will be interpreted as kilo, mega or
- * gigabytes.
- *
- */
-
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mtd/mtd.h>
-
-#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args)
-
-struct phram_mtd_list {
- struct list_head list;
- struct mtd_info *mtdinfo;
-};
-
-static LIST_HEAD(phram_list);
-
-
-
-int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- u_char *start = (u_char *)mtd->priv;
-
- if (instr->addr + instr->len > mtd->size)
- return -EINVAL;
-
- memset(start + instr->addr, 0xff, instr->len);
-
- /* This'll catch a few races. Free the thing before returning :)
- * I don't feel at all ashamed. This kind of thing is possible anyway
- * with flash, but unlikely.
- */
-
- instr->state = MTD_ERASE_DONE;
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char **mtdbuf)
-{
- u_char *start = (u_char *)mtd->priv;
-
- if (from + len > mtd->size)
- return -EINVAL;
-
- *mtdbuf = start + from;
- *retlen = len;
- return 0;
-}
-
-void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
-{
-}
-
-int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- u_char *start = (u_char *)mtd->priv;
-
- if (from + len > mtd->size)
- return -EINVAL;
-
- memcpy(buf, start + from, len);
-
- *retlen = len;
- return 0;
-}
-
-int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- u_char *start = (u_char *)mtd->priv;
-
- if (to + len > mtd->size)
- return -EINVAL;
-
- memcpy(start + to, buf, len);
-
- *retlen = len;
- return 0;
-}
-
-
-
-static void unregister_devices(void)
-{
- struct phram_mtd_list *this;
-
- list_for_each_entry(this, &phram_list, list) {
- del_mtd_device(this->mtdinfo);
- iounmap(this->mtdinfo->priv);
- kfree(this->mtdinfo);
- kfree(this);
- }
-}
-
-static int register_device(char *name, unsigned long start, unsigned long len)
-{
- struct phram_mtd_list *new;
- int ret = -ENOMEM;
-
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- goto out0;
-
- new->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!new->mtdinfo)
- goto out1;
-
- memset(new->mtdinfo, 0, sizeof(struct mtd_info));
-
- ret = -EIO;
- new->mtdinfo->priv = ioremap(start, len);
- if (!new->mtdinfo->priv) {
- ERROR("ioremap failed\n");
- goto out2;
- }
-
-
- new->mtdinfo->name = name;
- new->mtdinfo->size = len;
- new->mtdinfo->flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE;
- new->mtdinfo->erase = phram_erase;
- new->mtdinfo->point = phram_point;
- new->mtdinfo->unpoint = phram_unpoint;
- new->mtdinfo->read = phram_read;
- new->mtdinfo->write = phram_write;
- new->mtdinfo->owner = THIS_MODULE;
- new->mtdinfo->type = MTD_RAM;
- new->mtdinfo->erasesize = 0x0;
-
- ret = -EAGAIN;
- if (add_mtd_device(new->mtdinfo)) {
- ERROR("Failed to register new device\n");
- goto out3;
- }
-
- list_add_tail(&new->list, &phram_list);
- return 0;
-
-out3:
- iounmap(new->mtdinfo->priv);
-out2:
- kfree(new->mtdinfo);
-out1:
- kfree(new);
-out0:
- return ret;
-}
-
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
-{
- unsigned long result = simple_strtoul(cp, endp, base);
-
- switch (**endp) {
- case 'G':
- result *= 1024;
- case 'M':
- result *= 1024;
- case 'k':
- result *= 1024;
- endp++;
- }
- return result;
-}
-
-static int parse_num32(uint32_t *num32, const char *token)
-{
- char *endp;
- unsigned long n;
-
- n = ustrtoul(token, &endp, 0);
- if (*endp)
- return -EINVAL;
-
- *num32 = n;
- return 0;
-}
-
-static int parse_name(char **pname, const char *token)
-{
- size_t len;
- char *name;
-
- len = strlen(token) + 1;
- if (len > 64)
- return -ENOSPC;
-
- name = kmalloc(len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- strcpy(name, token);
-
- *pname = name;
- return 0;
-}
-
-#define parse_err(fmt, args...) do { \
- ERROR(fmt , ## args); \
- return 0; \
-} while (0)
-
-static int phram_setup(const char *val, struct kernel_param *kp)
-{
- char buf[64+12+12], *str = buf;
- char *token[3];
- char *name;
- uint32_t start;
- uint32_t len;
- int i, ret;
-
- if (strnlen(val, sizeof(str)) >= sizeof(str))
- parse_err("parameter too long\n");
-
- strcpy(str, val);
-
- for (i=0; i<3; i++)
- token[i] = strsep(&str, ",");
-
- if (str)
- parse_err("too many arguments\n");
-
- if (!token[2])
- parse_err("not enough arguments\n");
-
- ret = parse_name(&name, token[0]);
- if (ret == -ENOMEM)
- parse_err("out of memory\n");
- if (ret == -ENOSPC)
- parse_err("name too long\n");
- if (ret)
- return 0;
-
- ret = parse_num32(&start, token[1]);
- if (ret)
- parse_err("illegal start address\n");
-
- ret = parse_num32(&len, token[2]);
- if (ret)
- parse_err("illegal device length\n");
-
- register_device(name, start, len);
-
- return 0;
-}
-
-module_param_call(phram, phram_setup, NULL, NULL, 000);
-MODULE_PARM_DESC(phram, "Memory region to map. \"map=<name>,<start><length>\"");
-
-/*
- * Just for compatibility with slram, this is horrible and should go someday.
- */
-static int __init slram_setup(const char *val, struct kernel_param *kp)
-{
- char buf[256], *str = buf;
-
- if (!val || !val[0])
- parse_err("no arguments to \"slram=\"\n");
-
- if (strnlen(val, sizeof(str)) >= sizeof(str))
- parse_err("parameter too long\n");
-
- strcpy(str, val);
-
- while (str) {
- char *token[3];
- char *name;
- uint32_t start;
- uint32_t len;
- int i, ret;
-
- for (i=0; i<3; i++) {
- token[i] = strsep(&str, ",");
- if (token[i])
- continue;
- parse_err("wrong number of arguments to \"slram=\"\n");
- }
-
- /* name */
- ret = parse_name(&name, token[0]);
- if (ret == -ENOMEM)
- parse_err("of memory\n");
- if (ret == -ENOSPC)
- parse_err("too long\n");
- if (ret)
- return 1;
-
- /* start */
- ret = parse_num32(&start, token[1]);
- if (ret)
- parse_err("illegal start address\n");
-
- /* len */
- if (token[2][0] == '+')
- ret = parse_num32(&len, token[2] + 1);
- else
- ret = parse_num32(&len, token[2]);
-
- if (ret)
- parse_err("illegal device length\n");
-
- if (token[2][0] != '+') {
- if (len < start)
- parse_err("end < start\n");
- len -= start;
- }
-
- register_device(name, start, len);
- }
- return 1;
-}
-
-module_param_call(slram, slram_setup, NULL, NULL, 000);
-MODULE_PARM_DESC(slram, "List of memory regions to map. \"map=<name>,<start><length/end>\"");
-
-
-int __init init_phram(void)
-{
- printk(KERN_ERR "phram loaded\n");
- return 0;
-}
-
-static void __exit cleanup_phram(void)
-{
- unregister_devices();
-}
-
-module_init(init_phram);
-module_exit(cleanup_phram);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
-MODULE_DESCRIPTION("MTD driver for physical RAM");
/*
- * $Id: pmc551.c,v 1.28 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: pmc551.c,v 1.24 2003/05/20 21:03:08 dwmw2 Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>
+#if LINUX_VERSION_CODE > 0x20300
+#define PCI_BASE_ADDRESS(dev) (dev->resource[0].start)
+#else
+#define PCI_BASE_ADDRESS(dev) (dev->base_address[0])
+#endif
+
static struct mtd_info *pmc551list;
static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
printk(KERN_DEBUG "pmc551_erase() done\n");
#endif
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
return 0;
}
(size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
+ PCI_BASE_ADDRESS(dev)&PCI_BASE_ADDRESS_MEM_MASK );
/*
* Check to see the state of the memory
}
printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
- PCI_Device->resource[0].start);
+ PCI_BASE_ADDRESS(PCI_Device));
/*
* The PMC551 device acts VERY weird if you don't init it
printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
priv->asize = asize;
}
- priv->start = ioremap(((PCI_Device->resource[0].start)
+ priv->start = ioremap((PCI_BASE_ADDRESS(PCI_Device)
& PCI_BASE_ADDRESS_MEM_MASK),
priv->asize);
/*======================================================================
- $Id: slram.c,v 1.31 2004/08/09 13:19:44 dwmw2 Exp $
+ $Id: slram.c,v 1.30 2003/05/20 21:03:08 dwmw2 Exp $
This driver provides a method to access memory not used by the kernel
itself (i.e. if the kernel commandline mem=xxx is used). To actually
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
+ else {
+ kfree(instr);
+ }
return(0);
}
/* This version ported to the Linux-MTD system by dwmw2@infradead.org
- * $Id: ftl.c,v 1.53 2004/08/09 13:55:43 dwmw2 Exp $
+ * $Id: ftl.c,v 1.51 2003/06/23 12:00:08 dwmw2 Exp $
*
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
{
erase_unit_header_t header;
loff_t offset, max_offset;
- size_t ret;
- int err;
+ int ret;
part->header.FormattedSize = 0;
max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
/* Search first megabyte for a valid FTL header */
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
(unsigned char *)&header);
- if (err)
- return err;
+ if (ret)
+ return ret;
if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
}
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
- " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
+ " = 0x%x, Offset = 0x%x\n", log_addr, virt_addr,
offset);
return -EIO;
}
int init_ftl(void)
{
- DEBUG(0, "$Id: ftl.c,v 1.53 2004/08/09 13:55:43 dwmw2 Exp $\n");
+ DEBUG(0, "$Id: ftl.c,v 1.51 2003/06/23 12:00:08 dwmw2 Exp $\n");
return register_mtd_blktrans(&ftl_tr);
}
* (c) 1999 Machine Vision Holdings, Inc.
* Author: David Woodhouse <dwmw2@infradead.org>
*
- * $Id: inftlcore.c,v 1.17 2004/08/09 13:56:48 dwmw2 Exp $
+ * $Id: inftlcore.c,v 1.14 2003/06/26 08:28:26 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
struct INFTLrecord *inftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH)
- return;
- /* OK, this is moderately ugly. But probably safe. Alternatives? */
- if (memcmp(mtd->name, "DiskOnChip", 10))
+ if (mtd->ecctype != MTD_ECC_RS_DiskOnChip)
return;
- if (!mtd->block_isbad) {
- printk(KERN_ERR
-"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
-"Please use the new diskonchip driver under the NAND subsystem.\n");
- return;
- }
-
DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
inftl = kmalloc(sizeof(*inftl), GFP_KERNEL);
inftl->mbd.devnum = -1;
inftl->mbd.blksize = 512;
inftl->mbd.tr = tr;
- memcpy(&inftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
- inftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
if (INFTL_mount(inftl) < 0) {
printk(KERN_WARNING "INFTL: could not mount device\n");
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
- "desperate=%d)\n", inftl, desperate);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=0x%x,"
+ "desperate=%d)\n", (int)inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
struct inftl_oob oob;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
- "pending=%d)\n", inftl, thisVUC, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=0x%x,thisVUC=%d,"
+ "pending=%d)\n", (int)inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
+ ret = MTD_READECC(inftl->mbd.mtd, (inftl->EraseSize *
BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ &retlen, movebuf, (char *)&oob, NULL);
if (ret < 0) {
- ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
+ ret = MTD_READECC(inftl->mbd.mtd, (inftl->EraseSize *
BlockMap[block]) + (block * SECTORSIZE),
- SECTORSIZE, &retlen, movebuf);
+ SECTORSIZE, &retlen, movebuf, (char *)&oob,
+ NULL);
if (ret != -EIO)
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
"away on retry?\n");
}
- memset(&oob, 0xff, sizeof(struct inftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
MTD_WRITEECC(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
(block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf, (char *)&oob, &inftl->oobinfo);
+ movebuf, (char *)&oob, NULL);
}
/*
if (INFTL_formatblock(inftl, thisEUN) < 0) {
/*
* Could not erase : mark block as reserved.
+ * FixMe: Update Bad Unit Table on disk.
*/
inftl->PUtable[thisEUN] = BLOCK_RESERVED;
} else {
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
- "pending=%d)\n", inftl, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=0x%x,"
+ "pending=%d)\n", (int)inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
EUN = inftl->VUtable[chain];
size_t retlen;
int silly, silly2 = 3;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
do {
/*
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
- "thisVUC=%d)\n", inftl, thisVUC);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=0x%x,"
+ "thisVUC=%d)\n", (int)inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
if (INFTL_formatblock(inftl, thisEUN) < 0) {
/*
* Could not erase : mark block as reserved.
+ * FixMe: Update Bad Unit Table on medium.
*/
inftl->PUtable[thisEUN] = BLOCK_RESERVED;
} else {
size_t retlen;
struct inftl_bci bci;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
unsigned int writeEUN;
unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
size_t retlen;
- struct inftl_oob oob;
+ u8 eccbuf[6];
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
- "buffer=%p)\n", inftl, block, buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=0x%x,block=%ld,"
+ "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
/* Is block all zero? */
pend = buffer + SECTORSIZE;
return 1;
}
- memset(&oob, 0xff, sizeof(struct inftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
MTD_WRITEECC(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
blockofs, SECTORSIZE, &retlen, (char *)buffer,
- (char *)&oob, &inftl->oobinfo);
+ (char *)eccbuf, NULL);
/*
- * need to write SECTOR_USED flags since they are not written
+ * No need to write SECTOR_USED flags since they are written
* in mtd_writeecc
*/
} else {
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
- "buffer=%p)\n", inftl, block, buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=0x%x,block=%ld,"
+ "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- if (MTD_READ(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen,
- buffer))
+ u_char eccbuf[6];
+ if (MTD_READECC(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen,
+ buffer, eccbuf, NULL))
return -EIO;
}
return 0;
int __init init_inftl(void)
{
- printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.17 $, "
+ printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.14 $, "
"inftlmount.c %s\n", inftlmountrev);
return register_mtd_blktrans(&inftl_tr);
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: inftlmount.c,v 1.14 2004/08/09 13:57:42 dwmw2 Exp $
+ * $Id: inftlmount.c,v 1.11 2003/06/23 07:39:21 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/mtd/inftl.h>
#include <linux/mtd/compatmac.h>
-char inftlmountrev[]="$Revision: 1.14 $";
+char inftlmountrev[]="$Revision: 1.11 $";
/*
* find_boot_record: Find the INFTL Media Header and its Spare copy which
{
struct inftl_unittail h1;
//struct inftl_oob oob;
- unsigned int i, block;
+ unsigned int i, block, boot_record_count = 0;
u8 buf[SECTORSIZE];
struct INFTLMediaHeader *mh = &inftl->MediaHdr;
struct INFTLPartition *ip;
- size_t retlen;
+ int retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=0x%x)\n",
+ (int)inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
inftl->MediaUnit = BLOCK_NIL;
+ inftl->SpareMediaUnit = BLOCK_NIL;
/* Search for a valid boot record */
for (block = 0; block < inftl->nb_blocks; block++) {
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
- /* We ignore ret in case the ECC of the MediaHeader is invalid
- (which is apparently acceptable) */
- if (retlen != SECTORSIZE) {
+ if ((ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize,
+ SECTORSIZE, &retlen, buf))) {
static int warncount = 5;
if (warncount) {
continue;
}
+ if (boot_record_count) {
+ /*
+ * We've already processed one. So we just check if
+ * this one is the same as the first one we found.
+ */
+ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
+ printk(KERN_WARNING "INFTL: Media Headers at "
+ "0x%x and 0x%x disagree.\n",
+ inftl->MediaUnit * inftl->EraseSize,
+ block * inftl->EraseSize);
+ return -1;
+ }
+ if (boot_record_count == 1)
+ inftl->SpareMediaUnit = block;
+
+ /*
+ * Mark this boot record (INFTL MediaHeader) block as
+ * reserved.
+ */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+
+ boot_record_count++;
+ continue;
+ }
/*
* This is the first we've seen.
* Copy the media header structure into place.
*/
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
-
- /* Read the spare media header at offset 4096 */
- MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
- if (retlen != SECTORSIZE) {
- printk(KERN_WARNING "INFTL: Unable to read spare "
- "Media Header\n");
- return -1;
- }
- /* Check if this one is the same as the first one we found. */
- if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
- printk(KERN_WARNING "INFTL: Primary and spare Media "
- "Headers disagree.\n");
- return -1;
- }
-
mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
"UnitSizeFactor 0x%02x is experimental\n",
mh->BlockMultiplierBits);
inftl->EraseSize = inftl->mbd.mtd->erasesize <<
- mh->BlockMultiplierBits;
+ (0xff - mh->BlockMultiplierBits);
inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
- block >>= mh->BlockMultiplierBits;
}
/* Scan the partitions */
inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
if (!inftl->PUtable) {
printk(KERN_WARNING "INFTL: allocation of PUtable "
- "failed (%zd bytes)\n",
+ "failed (%d bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
if (!inftl->VUtable) {
kfree(inftl->PUtable);
printk(KERN_WARNING "INFTL: allocation of VUtable "
- "failed (%zd bytes)\n",
+ "failed (%d bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
/* Mark this boot record (NFTL MediaHeader) block as reserved */
inftl->PUtable[block] = BLOCK_RESERVED;
+#if 0
/* Read Bad Erase Unit Table and modify PUtable[] accordingly */
for (i = 0; i < inftl->nb_blocks; i++) {
- int physblock;
- /* If any of the physical eraseblocks are bad, don't
- use the unit. */
- for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
- inftl->PUtable[i] = BLOCK_RESERVED;
+ if ((i & (SECTORSIZE - 1)) == 0) {
+ /* read one sector for every SECTORSIZE of blocks */
+ if ((ret = MTD_READECC(inftl->mbd.mtd,
+ block * inftl->EraseSize + i + SECTORSIZE,
+ SECTORSIZE, &retlen, buf,
+ (char *)&oob, NULL)) < 0) {
+ printk(KERN_WARNING "INFTL: read of "
+ "bad sector table failed "
+ "(err %d)\n", ret);
+ kfree(inftl->VUtable);
+ kfree(inftl->PUtable);
+ return -1;
+ }
}
+ /* Mark the Bad Erase Unit as RESERVED in PUtable */
+ if (buf[i & (SECTORSIZE - 1)] != 0xff)
+ inftl->PUtable[i] = BLOCK_RESERVED;
}
+#endif
inftl->MediaUnit = block;
- return 0;
+ boot_record_count++;
}
-
- /* Not found. */
- return -1;
+
+ return boot_record_count ? 0 : -1;
}
static int memcmpb(void *a, int c, int n)
static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int len, int check_oob)
{
- u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
- size_t retlen;
- int i;
+ int i, retlen;
+ u8 buf[SECTORSIZE];
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
- "address=0x%x,len=%d,check_oob=%d)\n", inftl,
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=0x%x,"
+ "address=0x%x,len=%d,check_oob=%d)\n", (int)inftl,
address, len, check_oob);
for (i = 0; i < len; i += SECTORSIZE) {
- if (MTD_READECC(inftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &inftl->oobinfo) < 0)
+ /*
+ * We want to read the sector without ECC check here since a
+ * free sector does not have ECC syndrome on it yet.
+ */
+ if (MTD_READ(inftl->mbd.mtd, address, SECTORSIZE, &retlen, buf) < 0)
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
if (check_oob) {
- if (memcmpb(buf + SECTORSIZE, 0xff, inftl->mbd.mtd->oobsize) != 0)
+ if (MTD_READOOB(inftl->mbd.mtd, address,
+ inftl->mbd.mtd->oobsize, &retlen, buf) < 0)
+ return -1;
+ if (memcmpb(buf, 0xff, inftl->mbd.mtd->oobsize) != 0)
return -1;
}
address += SECTORSIZE;
* Return: 0 when succeed, -1 on error.
*
* ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * 2. UnitSizeFactor != 0xFF
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
- size_t retlen;
+ int retlen;
struct inftl_unittail uci;
struct erase_info *instr = &inftl->instr;
- int physblock;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
memset(instr, 0, sizeof(struct erase_info));
- /* FIXME: Shouldn't we be setting the 'discarded' flag to zero
- _first_? */
-
/* Use async erase interface, test return code */
instr->addr = block * inftl->EraseSize;
- instr->len = inftl->mbd.mtd->erasesize;
- /* Erase one physical eraseblock at a time, even though the NAND api
- allows us to group them. This way we if we have a failure, we can
- mark only the failed block in the bbt. */
- for (physblock = 0; physblock < inftl->EraseSize; physblock += instr->len, instr->addr += instr->len) {
- MTD_ERASE(inftl->mbd.mtd, instr);
-
- if (instr->state == MTD_ERASE_FAILED) {
- printk(KERN_WARNING "INFTL: error while formatting block %d\n",
- block);
- goto fail;
- }
+ instr->len = inftl->EraseSize;
+ MTD_ERASE(inftl->mbd.mtd, instr);
+ if (instr->state == MTD_ERASE_FAILED) {
/*
- * Check the "freeness" of Erase Unit before updating metadata.
- * FixMe: is this check really necessary? Since we have check the
- * return code after the erase operation.
- */
- if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
- goto fail;
+ * Could not format, FixMe: We should update the BadUnitTable
+ * both in memory and on disk.
+ */
+ printk(KERN_WARNING "INFTL: error while formatting block %d\n",
+ block);
+ return -1;
}
+ /*
+ * Check the "freeness" of Erase Unit before updating metadata.
+ * FixMe: is this check really necessary? Since we have check the
+ * return code after the erase operation.
+ */
+ if (check_free_sectors(inftl, instr->addr, inftl->EraseSize, 1) != 0)
+ return -1;
+
uci.EraseMark = cpu_to_le16(ERASE_MARK);
uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
uci.Reserved[0] = 0;
uci.Reserved[1] = 0;
uci.Reserved[2] = 0;
uci.Reserved[3] = 0;
- instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
- if (MTD_WRITEOOB(inftl->mbd.mtd, instr->addr +
+ if (MTD_WRITEOOB(inftl->mbd.mtd, block * inftl->EraseSize + SECTORSIZE * 2 +
8, 8, &retlen, (char *)&uci) < 0)
- goto fail;
+ return -1;
return 0;
-fail:
- /* could not format, update the bad block table (caller is responsible
- for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
- return -1;
}
/*
if (INFTL_formatblock(inftl, block) < 0) {
/*
* Cannot format !!!! Mark it as Bad Unit,
+ * FixMe: update the BadUnitTable on disk.
*/
inftl->PUtable[block] = BLOCK_RESERVED;
} else {
int chain_length, do_format_chain;
struct inftl_unithead1 h0;
struct inftl_unittail h1;
- size_t retlen;
- int i;
+ int i, retlen;
u8 *ANACtable, ANAC;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=0x%x)\n", (int)s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.30 2004/07/21 00:16:14 jwboyer Exp $
+# $Id: Kconfig,v 1.12 2003/06/23 07:38:11 dwmw2 Exp $
menu "Mapping drivers for chip access"
depends on MTD!=n
command set driver code to communicate with flash chips which
are mapped physically into the CPU's memory. You will need to
configure the physical address and size of the flash chips on
- your particular board as well as the bus width, either statically
- with config options or at run-time.
+ your particular board as well as the bus width.
config MTD_PHYSMAP_START
hex "Physical start address of flash mapping"
are mapped on your particular target board. Refer to the
memory map which should hopefully be in the documentation for
your board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_LEN
hex "Physical length of flash mapping"
than the total amount of flash present. Refer to the memory
map which should hopefully be in the documentation for your
board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
-config MTD_PHYSMAP_BANKWIDTH
- int "Bank width in octets"
+config MTD_PHYSMAP_BUSWIDTH
+ int "Bus width in octets"
depends on MTD_PHYSMAP
default "2"
help
in octets. For example, if you have a data bus width of 32
bits, you would set the bus width octect value to 4. This is
used internally by the CFI drivers.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
BE VERY CAREFUL.
-config MTD_ICHXROM
- tristate "BIOS flash chip on Intel Controller Hub 2/3/4/5"
+config MTD_ICH2ROM
+ tristate "BIOS flash chip on Intel Hub Controller 2"
depends on X86 && MTD_JEDECPROBE && MTD_COMPLEX_MAPPINGS
help
- Support for treating the BIOS flash chip on ICHX motherboards
+ Support for treating the BIOS flash chip on ICH2 motherboards
as an MTD device - with this you can reprogram your BIOS.
BE VERY CAREFUL.
config MTD_LASAT
tristate "Flash chips on LASAT board"
- depends on LASAT
+ depends on LASAT && MTD_CFI
help
Support for the flash chips on the Lasat 100 and 200 boards.
You can say 'Y' to both this and 'MTD_PB1XXX_BOOT' above, to use
both banks.
-config MTD_PB1550
- tristate "Flash devices on Alchemy PB1550 board"
- depends on MIPS && MIPS_PB1550
- help
- Flash memory access on Alchemy Pb1550 board
-
-config MTD_PB1550_BOOT
- bool "PB1550 boot flash device"
- depends on MTD_PB1550
- help
- Use the first of the two 64MiB flash banks on Pb1550 board.
- You can say 'Y' to both this and 'MTD_PB1550_USER' below, to use
- both banks.
-
-config MTD_PB1550_USER
- bool "PB1550 user flash device"
- depends on MTD_PB1550
- default y if MTD_PB1550_BOOT = n
- help
- Use the second of the two 64MiB flash banks on Pb1550 board.
- You can say 'Y' to both this and 'MTD_PB1550_BOOT' above, to use
- both banks.
-
-config MTD_DB1550
- tristate "Flash devices on Alchemy DB1550 board"
- depends on MIPS && MIPS_DB1550
- help
- Flash memory access on Alchemy Db1550 board
-
-config MTD_DB1550_BOOT
- bool "DB1550 boot flash device"
- depends on MTD_DB1550
- help
- Use the first of the two 64MiB flash banks on Db1550 board.
- You can say 'Y' to both this and 'MTD_DB1550_USER' below, to use
- both banks.
-
-config MTD_DB1550_USER
- bool "DB1550 user flash device"
- depends on MTD_DB1550
- default y if MTD_DB1550_BOOT = n
- help
- Use the second of the two 64MiB flash banks on Db1550 board.
- You can say 'Y' to both this and 'MTD_DB1550_BOOT' above, to use
- both banks.
-
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
BE VERY CAREFUL.
-config MTD_SBC8240
- tristate "Flash device on SBC8240"
- depends on PPC32 && MTD_JEDECPROBE && 6xx && 8260
- help
- Flash access on the SBC8240 board from Wind River. See
- <http://www.windriver.com/products/sbc8240/>
-
config MTD_TQM8XXL
tristate "CFI Flash device mapped on TQM8XXL"
depends on MTD_CFI && PPC32 && 8xx && TQM8xxL
config MTD_DBOX2
tristate "CFI Flash device mapped on D-Box2"
- depends on PPC32 && 8xx && DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
+ depends on PPC32 && 8xx && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
help
This enables access routines for the flash chips on the Nokia/Sagem
D-Box 2 board. If you have one of these boards and would like to use
PhotoMax Digital Picture Frame.
If you have such a device, say 'Y'.
-config MTD_NOR_TOTO
- tristate "NOR Flash device on TOTO board"
- depends on ARM && ARCH_OMAP && OMAP_TOTO
- help
- This enables access to the NOR flash on the Texas Instruments
- TOTO board.
-
config MTD_H720X
tristate "Hynix evaluation board mappings"
depends on ARM && MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
This enables access to the flash chips on the Hynix evaluation boards.
If you have such a board, say 'Y'.
-config MTD_MPC1211
- tristate "CFI Flash device mapped on Interface MPC-1211"
- depends on SUPERH && SH_MPC1211 && MTD_CFI
- help
- This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02).
- If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
config MTD_WRSBC8260
tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
- depends on (SBC82xx || SBC8560)
- select MTD_PARTITIONS
- select MTD_MAP_BANK_WIDTH_4
- select MTD_MAP_BANK_WIDTH_1
- select MTD_CFI_I1
- select MTD_CFI_I4
+ depends on MTD_PARTITIONS && SBC82xx
help
Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
all three flash regions on CS0, CS1 and CS6 if they are configured
correctly by the boot loader.
-config MTD_DMV182
- tristate "Map driver for Dy-4 SVME/DMV-182 board."
- depends on DMV182
- select MTD_PARTITIONS
- select MTD_MAP_BANK_WIDTH_32
- select MTD_CFI_I8
- select MTD_CFI_AMDSTD
- help
- Map driver for Dy-4 SVME/DMV-182 board.
-
endmenu
#
# linux/drivers/maps/Makefile
#
-# $Id: Makefile.common,v 1.14 2004/07/12 16:07:31 dwmw2 Exp $
+# $Id: Makefile.common,v 1.2 2003/05/28 10:48:41 dwmw2 Exp $
ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
obj-$(CONFIG_MTD) += map_funcs.o
obj-$(CONFIG_MTD_IQ80310) += iq80310.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
-obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
+obj-$(CONFIG_MTD_ICH2ROM) += ich2rom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_LUBBOCK) += lubbock-flash.o
obj-$(CONFIG_MTD_MBX860) += mbx860.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_PB1XXX) += pb1xxx-flash.o
-obj-$(CONFIG_MTD_DB1X00) += db1x00-flash.o
-obj-$(CONFIG_MTD_PB1550) += pb1550-flash.o
-obj-$(CONFIG_MTD_DB1550) += db1550-flash.o
obj-$(CONFIG_MTD_LASAT) += lasat.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
obj-$(CONFIG_MTD_H720X) += h720x-flash.o
-obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
-obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o
-obj-$(CONFIG_MTD_MPC1211) += mpc1211.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
-obj-$(CONFIG_MTD_DMV182) += dmv182.o
* amd76xrom.c
*
* Normal mappings of chips in physical memory
- * $Id: amd76xrom.c,v 1.12 2004/07/14 14:44:31 thayne Exp $
+ * $Id: amd76xrom.c,v 1.8 2003/05/28 15:44:28 dwmw2 Exp $
*/
#include <linux/module.h>
#include <linux/pci_ids.h>
-#define xstr(s) str(s)
-#define str(s) #s
-#define MOD_NAME xstr(KBUILD_BASENAME)
-
-#define MTD_DEV_NAME_LENGTH 16
-
struct amd76xrom_map_info {
struct map_info map;
struct mtd_info *mtd;
unsigned long window_addr;
u32 window_start, window_size;
struct pci_dev *pdev;
- struct resource window_rsrc;
- struct resource rom_rsrc;
- char mtd_name[MTD_DEV_NAME_LENGTH];
};
static struct amd76xrom_map_info amd76xrom_map = {
.map = {
- .name = MOD_NAME,
+ .name = "AMD76X rom",
.size = 0,
- .bankwidth = 1,
- }
- /* remaining fields of structure are initialized to 0 */
+ .buswidth = 1,
+ },
+ .mtd = NULL,
+ .window_addr = 0,
};
-
-static void amd76xrom_cleanup(struct amd76xrom_map_info *info)
-{
- u8 byte;
-
- /* Disable writes through the rom window */
- pci_read_config_byte(info->pdev, 0x40, &byte);
- pci_write_config_byte(info->pdev, 0x40, byte & ~1);
-
- if (info->mtd) {
- del_mtd_device(info->mtd);
- map_destroy(info->mtd);
- info->mtd = NULL;
- info->map.virt = 0;
- }
- if (info->rom_rsrc.parent)
- release_resource(&info->rom_rsrc);
- if (info->window_rsrc.parent)
- release_resource(&info->window_rsrc);
-
- if (info->window_addr) {
- iounmap((void *)(info->window_addr));
- info->window_addr = 0;
- }
-}
-
-
static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 segen_bits;
};
static struct rom_window rom_window[] = {
- /*
- * Need the 5MiB window for chips that have block lock/unlock
- * registers located below 4MiB window.
- */
{ 0xffb00000, 5*1024*1024, (1<<7) | (1<<6), },
{ 0xffc00000, 4*1024*1024, (1<<7), },
{ 0xffff0000, 64*1024, 0 },
int i;
u32 rom_size;
- info->pdev = pdev;
window = &rom_window[0];
- while (window->size) {
- /*
- * Try to reserve the window mem region. If this fails then
- * it is likely due to a fragment of the window being
- * "reseved" by the BIOS. In the case that the
- * request_mem_region() fails then once the rom size is
- * discovered we will try to reserve the unreserved fragment.
- */
- info->window_rsrc.name = MOD_NAME;
- info->window_rsrc.start = window->start;
- info->window_rsrc.end = window->start + window->size - 1;
- info->window_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&iomem_resource, &info->window_rsrc)) {
- info->window_rsrc.parent = NULL;
- printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.08lx-0x%.08lx - kernel bug?\n",
- __func__,
- info->window_rsrc.start, info->window_rsrc.end);
+ /* disabled because it fights with BIOS reserved regions */
+#define REQUEST_MEM_REGION 0
+#if REQUEST_MEM_REGION
+ while(window->size) {
+ if (request_mem_region(window->start, window->size, "amd76xrom")) {
+ break;
}
+ window++;
+ }
+ if (!window->size) {
+ printk(KERN_ERR "amd76xrom: cannot reserve rom window\n");
+ goto err_out_none;
+ }
+#endif /* REQUEST_MEM_REGION */
- /* Enable the selected rom window */
- pci_read_config_byte(pdev, 0x43, &byte);
- pci_write_config_byte(pdev, 0x43, byte | window->segen_bits);
+ /* Enable the selected rom window */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ pci_write_config_byte(pdev, 0x43, byte | window->segen_bits);
- /* Enable writes through the rom window */
- pci_read_config_byte(pdev, 0x40, &byte);
- pci_write_config_byte(pdev, 0x40, byte | 1);
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x40, &byte);
+ pci_write_config_byte(pdev, 0x40, byte | 1);
- /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
- printk(KERN_NOTICE MOD_NAME " window : %x at %x\n",
- window->size, window->start);
- /* For write accesses caches are useless */
- info->window_addr =
- (unsigned long)ioremap_nocache(window->start,
- window->size);
+ printk(KERN_NOTICE "amd76xrom window : %x at %x\n",
+ window->size, window->start);
+ /* For write accesses caches are useless */
+ info->window_addr = (unsigned long)ioremap_nocache(window->start, window->size);
- if (!info->window_addr) {
- printk(KERN_ERR "Failed to ioremap\n");
+ if (!info->window_addr) {
+ printk(KERN_ERR "Failed to ioremap\n");
+ goto err_out_free_mmio_region;
+ }
+ info->mtd = NULL;
+ for(i = 0; (rom_size = rom_probe_sizes[i]); i++) {
+ char **chip_type;
+ if (rom_size > window->size) {
continue;
}
-
- info->mtd = NULL;
-
- for(i = 0; (rom_size = rom_probe_sizes[i]); i++) {
- char **chip_type;
- if (rom_size > window->size) {
- continue;
- }
- info->map.phys = window->start + window->size - rom_size;
- info->map.virt =
- info->window_addr + window->size - rom_size;
- info->map.size = rom_size;
- simple_map_init(&info->map);
- chip_type = rom_probe_types;
- for(; !info->mtd && *chip_type; chip_type++) {
- info->mtd = do_map_probe(*chip_type, &amd76xrom_map.map);
- }
- if (info->mtd) goto found_mtd;
+ info->map.phys = window->start + window->size - rom_size;
+ info->map.virt =
+ info->window_addr + window->size - rom_size;
+ info->map.size = rom_size;
+ simple_map_init(&info->map);
+ chip_type = rom_probe_types;
+ for(; !info->mtd && *chip_type; chip_type++) {
+ info->mtd = do_map_probe(*chip_type, &amd76xrom_map.map);
+ }
+ if (info->mtd) {
+ break;
}
- iounmap((void *)(info->window_addr));
- info->window_addr = 0;
-
- /* Disable writes through the rom window */
- pci_read_config_byte(pdev, 0x40, &byte);
- pci_write_config_byte(pdev, 0x40, byte & ~1);
-
- window++;
}
- goto failed;
-
- found_mtd:
- printk(KERN_NOTICE MOD_NAME " chip at offset: 0x%x\n",
+ if (!info->mtd) {
+ goto err_out_iounmap;
+ }
+ printk(KERN_NOTICE "amd76xrom chip at offset: 0x%x\n",
window->size - rom_size);
-
+
info->mtd->owner = THIS_MODULE;
-
- if (!info->window_rsrc.parent) {
- /* failed to reserve entire window - try fragments */
- info->window_rsrc.name = MOD_NAME;
- info->window_rsrc.start = window->start;
- info->window_rsrc.end = window->start + window->size - rom_size - 1;
- info->window_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&iomem_resource, &info->window_rsrc)) {
- printk(KERN_ERR MOD_NAME
- ": cannot reserve window resource fragment\n");
-#if 0
- /*
- * The BIOS e820 usually reserves this so it isn't
- * usually an error.
- */
- goto failed;
-#endif
- }
- }
-
add_mtd_device(info->mtd);
info->window_start = window->start;
info->window_size = window->size;
-
- if (info->window_rsrc.parent) {
- /*
- * Registering the MTD device in iomem may not be possible
- * if there is a BIOS "reserved" and BUSY range. If this
- * fails then continue anyway.
- */
- snprintf(info->mtd_name, MTD_DEV_NAME_LENGTH,
- "mtd%d", info->mtd->index);
-
- info->rom_rsrc.name = info->mtd_name;
- info->rom_rsrc.start = window->start + window->size - rom_size;
- info->rom_rsrc.end = window->start + window->size - 1;
- info->rom_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&info->window_rsrc, &info->rom_rsrc)) {
- printk(KERN_ERR MOD_NAME
- ": cannot reserve MTD resource\n");
- info->rom_rsrc.parent = NULL;
- }
- }
-
return 0;
- failed:
- amd76xrom_cleanup(info);
+err_out_iounmap:
+ iounmap((void *)(info->window_addr));
+err_out_free_mmio_region:
+#if REQUEST_MEM_REGION
+ release_mem_region(window->start, window->size);
+err_out_none:
+#endif /* REQUEST_MEM_REGION */
return -ENODEV;
}
static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
{
struct amd76xrom_map_info *info = &amd76xrom_map;
+ u8 byte;
+
+ del_mtd_device(info->mtd);
+ map_destroy(info->mtd);
+ info->mtd = NULL;
+ info->map.virt = 0;
+
+ iounmap((void *)(info->window_addr));
+ info->window_addr = 0;
+
+ /* Disable writes through the rom window */
+ pci_read_config_byte(pdev, 0x40, &byte);
+ pci_write_config_byte(pdev, 0x40, byte & ~1);
- amd76xrom_cleanup(info);
+#if REQUEST_MEM_REGION
+ release_mem_region(info->window_start, info->window_size);
+#endif /* REQUEST_MEM_REGION */
}
static struct pci_device_id amd76xrom_pci_tbl[] = {
#if 0
static struct pci_driver amd76xrom_driver = {
- .name = MOD_NAME,
+ .name = "amd76xrom",
.id_table = amd76xrom_pci_tbl,
.probe = amd76xrom_init_one,
.remove = amd76xrom_remove_one,
/*
- * $Id: arctic-mtd.c,v 1.11 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: arctic-mtd.c,v 1.10 2003/06/02 16:37:59 trini Exp $
*
* drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
* IBM 405LP Arctic boards.
static struct map_info arctic_mtd_map = {
.name = NAME,
.size = SIZE,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
.phys = PADDR,
};
* NV-RAM memory access on autcpu12
* (C) 2002 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: autcpu12-nvram.c,v 1.6 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: autcpu12-nvram.c,v 1.5 2003/05/21 12:45:18 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
struct map_info autcpu12_sram_map = {
.name = "SRAM",
.size = 32768,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = 0x12000000,
};
/*
- * $Id: beech-mtd.c,v 1.8 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: beech-mtd.c,v 1.7 2003/05/21 12:45:18 dwmw2 Exp $
*
* drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
* IBM 405LP Beech boards.
static struct map_info beech_mtd_map = {
.name = NAME,
.size = SIZE,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
.phys = PADDR
};
/*
* Flash on Cirrus CDB89712
*
- * $Id: cdb89712.c,v 1.8 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: cdb89712.c,v 1.7 2003/05/21 12:45:18 dwmw2 Exp $
*/
#include <linux/module.h>
struct map_info cdb89712_flash_map = {
.name = "flash",
.size = FLASH_SIZE,
- .bankwidth = FLASH_WIDTH,
+ .buswidth = FLASH_WIDTH,
.phys = FLASH_START,
};
struct map_info cdb89712_sram_map = {
.name = "SRAM",
.size = SRAM_SIZE,
- .bankwidth = SRAM_WIDTH,
+ .buswidth = SRAM_WIDTH,
.phys = SRAM_START,
};
struct map_info cdb89712_bootrom_map = {
.name = "BootROM",
.size = BOOTROM_SIZE,
- .bankwidth = BOOTROM_WIDTH,
+ .buswidth = BOOTROM_WIDTH,
.phys = BOOTROM_START,
};
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
- * $Id: ceiva.c,v 1.10 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: ceiva.c,v 1.8 2003/05/21 12:45:18 dwmw2 Exp $
*/
#include <linux/config.h>
}
clps[i].map->virt = (unsigned long)clps[i].vbase;
- clps[i].map->bankwidth = clps[i].width;
+ clps[i].map->buswidth = clps[i].width;
clps[i].map->size = clps[i].size;
simple_map_init(&clps[i].map);
/*
* Copyright © 2001 Flaga hf. Medical Devices, Kári DavÃðsson <kd@flaga.is>
*
- * $Id: cfi_flagadm.c,v 1.12 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: cfi_flagadm.c,v 1.11 2003/05/21 12:45:18 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
struct map_info flagadm_map = {
.name = "FlagaDM flash device",
.size = FLASH_SIZE,
- .bankwidth = 2,
+ .buswidth = 2,
};
struct mtd_partition flagadm_parts[] = {
/*
- * $Id: cstm_mips_ixx.c,v 1.10 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: cstm_mips_ixx.c,v 1.9 2003/05/21 12:45:18 dwmw2 Exp $
*
* Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
* Config with both CFI and JEDEC device support.
char *name;
unsigned long window_addr;
unsigned long window_size;
- int bankwidth;
+ int buswidth;
int num_partitions;
};
"big flash", // name
0x08000000, // window_addr
0x02000000, // window_size
- 4, // bankwidth
+ 4, // buswidth
1, // num_partitions
}
"MTD flash", // name
CONFIG_MTD_CSTM_MIPS_IXX_START, // window_addr
CONFIG_MTD_CSTM_MIPS_IXX_LEN, // window_size
- CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // bankwidth
+ CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // buswidth
1, // num_partitions
},
}
cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
cstm_mips_ixx_map[i].size = cstm_mips_ixx_board_desc[i].window_size;
- cstm_mips_ixx_map[i].bankwidth = cstm_mips_ixx_board_desc[i].bankwidth;
+ cstm_mips_ixx_map[i].buswidth = cstm_mips_ixx_board_desc[i].buswidth;
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
cstm_mips_ixx_map[i].set_vpp = cstm_mips_ixx_set_vpp;
#endif
+++ /dev/null
-/*
- * Flash memory access on Alchemy Db1550 board
- *
- * $Id: db1550-flash.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $
- *
- * (C) 2004 Embedded Edge, LLC, based on db1550-flash.c:
- * (C) 2003 Pete Popov <pete_popov@yahoo.com>
- *
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/au1000.h>
-
-#ifdef DEBUG_RW
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-static unsigned long window_addr;
-static unsigned long window_size;
-
-
-static struct map_info db1550_map = {
- .name = "Db1550 flash",
-};
-
-static unsigned char flash_bankwidth = 4;
-
-/*
- * Support only 64MB NOR Flash parts
- */
-
-#if defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER)
-#define DB1550_BOTH_BANKS
-#elif defined(CONFIG_MTD_DB1550_BOOT) && !defined(CONFIG_MTD_DB1550_USER)
-#define DB1550_BOOT_ONLY
-#elif !defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER)
-#define DB1550_USER_ONLY
-#endif
-
-#ifdef DB1550_BOTH_BANKS
-/* both banks will be used. Combine the first bank and the first
- * part of the second bank together into a single jffs/jffs2
- * partition.
- */
-static struct mtd_partition db1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
- * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
- */
- {
- .name = "User FS",
- .size = (0x1FC00000 - 0x18000000),
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(DB1550_BOOT_ONLY)
-static struct mtd_partition db1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
- */
- {
- .name = "User FS",
- .size = 0x03c00000,
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000-0x40000), /* last 256KB is yamon env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(DB1550_USER_ONLY)
-static struct mtd_partition db1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
- */
- {
- .name = "User FS",
- .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */
- .offset = 0x0000000
- },{
- .name = "raw kernel",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#else
-#error MTD_DB1550 define combo error /* should never happen */
-#endif
-
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
-static struct mtd_info *mymtd;
-
-/*
- * Probe the flash density and setup window address and size
- * based on user CONFIG options. There are times when we don't
- * want the MTD driver to be probing the boot or user flash,
- * so having the option to enable only one bank is important.
- */
-int setup_flash_params(void)
-{
-#if defined(DB1550_BOTH_BANKS)
- window_addr = 0x18000000;
- window_size = 0x8000000;
-#elif defined(DB1550_BOOT_ONLY)
- window_addr = 0x1C000000;
- window_size = 0x4000000;
-#else /* USER ONLY */
- window_addr = 0x1E000000;
- window_size = 0x4000000;
-#endif
- return 0;
-}
-
-int __init db1550_mtd_init(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0;
-
- /* Default flash bankwidth */
- db1550_map.bankwidth = flash_bankwidth;
-
- if (setup_flash_params())
- return -ENXIO;
-
- /*
- * Static partition definition selection
- */
- parts = db1550_partitions;
- nb_parts = NB_OF(db1550_partitions);
- db1550_map.size = window_size;
-
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE "Pb1550 flash: probing %d-bit flash bus\n",
- db1550_map.bankwidth*8);
- db1550_map.virt =
- (unsigned long)ioremap(window_addr, window_size);
- mymtd = do_map_probe("cfi_probe", &db1550_map);
- if (!mymtd) return -ENXIO;
- mymtd->owner = THIS_MODULE;
-
- add_mtd_partitions(mymtd, parts, nb_parts);
- return 0;
-}
-
-static void __exit db1550_mtd_cleanup(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- }
-}
-
-module_init(db1550_mtd_init);
-module_exit(db1550_mtd_cleanup);
-
-MODULE_AUTHOR("Embedded Edge, LLC");
-MODULE_DESCRIPTION("Db1550 mtd map driver");
-MODULE_LICENSE("GPL");
+++ /dev/null
-/*
- * Flash memory access on Alchemy Db1xxx boards
- *
- * $Id: db1x00-flash.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $
- *
- * (C) 2003 Pete Popov <ppopov@pacbell.net>
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/au1000.h>
-#include <asm/db1x00.h>
-
-#ifdef DEBUG_RW
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-static unsigned long window_addr;
-static unsigned long window_size;
-static unsigned long flash_size;
-
-static BCSR * const bcsr = (BCSR *)0xAE000000;
-static unsigned char flash_bankwidth = 4;
-
-/*
- * The Db1x boards support different flash densities. We setup
- * the mtd_partition structures below for default of 64Mbit
- * flash densities, and override the partitions sizes, if
- * necessary, after we check the board status register.
- */
-
-#ifdef DB1X00_BOTH_BANKS
-/* both banks will be used. Combine the first bank and the first
- * part of the second bank together into a single jffs/jffs2
- * partition.
- */
-static struct mtd_partition db1x00_partitions[] = {
- {
- .name = "User FS",
- .size = 0x1c00000,
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000-0x40000), /* last 256KB is env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(DB1X00_BOOT_ONLY)
-static struct mtd_partition db1x00_partitions[] = {
- {
- .name = "User FS",
- .size = 0x00c00000,
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000-0x40000), /* last 256KB is env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(DB1X00_USER_ONLY)
-static struct mtd_partition db1x00_partitions[] = {
- {
- .name = "User FS",
- .size = 0x0e00000,
- .offset = 0x0000000
- },{
- .name = "raw kernel",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#else
-#error MTD_DB1X00 define combo error /* should never happen */
-#endif
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
-#define NAME "Db1x00 Linux Flash"
-
-static struct map_info db1xxx_mtd_map = {
- .name = NAME,
-};
-
-static struct mtd_partition *parsed_parts;
-static struct mtd_info *db1xxx_mtd;
-
-/*
- * Probe the flash density and setup window address and size
- * based on user CONFIG options. There are times when we don't
- * want the MTD driver to be probing the boot or user flash,
- * so having the option to enable only one bank is important.
- */
-int setup_flash_params(void)
-{
- switch ((bcsr->status >> 14) & 0x3) {
- case 0: /* 64Mbit devices */
- flash_size = 0x800000; /* 8MB per part */
-#if defined(DB1X00_BOTH_BANKS)
- window_addr = 0x1E000000;
- window_size = 0x2000000;
-#elif defined(DB1X00_BOOT_ONLY)
- window_addr = 0x1F000000;
- window_size = 0x1000000;
-#else /* USER ONLY */
- window_addr = 0x1E000000;
- window_size = 0x1000000;
-#endif
- break;
- case 1:
- /* 128 Mbit devices */
- flash_size = 0x1000000; /* 16MB per part */
-#if defined(DB1X00_BOTH_BANKS)
- window_addr = 0x1C000000;
- window_size = 0x4000000;
- /* USERFS from 0x1C00 0000 to 0x1FC0 0000 */
- db1x00_partitions[0].size = 0x3C00000;
-#elif defined(DB1X00_BOOT_ONLY)
- window_addr = 0x1E000000;
- window_size = 0x2000000;
- /* USERFS from 0x1E00 0000 to 0x1FC0 0000 */
- db1x00_partitions[0].size = 0x1C00000;
-#else /* USER ONLY */
- window_addr = 0x1C000000;
- window_size = 0x2000000;
- /* USERFS from 0x1C00 0000 to 0x1DE00000 */
- db1x00_partitions[0].size = 0x1DE0000;
-#endif
- break;
- case 2:
- /* 256 Mbit devices */
- flash_size = 0x4000000; /* 64MB per part */
-#if defined(DB1X00_BOTH_BANKS)
- return 1;
-#elif defined(DB1X00_BOOT_ONLY)
- /* Boot ROM flash bank only; no user bank */
- window_addr = 0x1C000000;
- window_size = 0x4000000;
- /* USERFS from 0x1C00 0000 to 0x1FC00000 */
- db1x00_partitions[0].size = 0x3C00000;
-#else /* USER ONLY */
- return 1;
-#endif
- break;
- default:
- return 1;
- }
- db1xxx_mtd_map.size = window_size;
- db1xxx_mtd_map.bankwidth = flash_bankwidth;
- db1xxx_mtd_map.phys = window_addr;
- db1xxx_mtd_map.bankwidth = flash_bankwidth;
- return 0;
-}
-
-int __init db1x00_mtd_init(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0;
-
- if (setup_flash_params())
- return -ENXIO;
-
- /*
- * Static partition definition selection
- */
- parts = db1x00_partitions;
- nb_parts = NB_OF(db1x00_partitions);
-
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE "Db1xxx flash: probing %d-bit flash bus\n",
- db1xxx_mtd_map.bankwidth*8);
- db1xxx_mtd_map.virt = (unsigned long)ioremap(window_addr, window_size);
- db1xxx_mtd = do_map_probe("cfi_probe", &db1xxx_mtd_map);
- if (!db1xxx_mtd) return -ENXIO;
- db1xxx_mtd->owner = THIS_MODULE;
-
- add_mtd_partitions(db1xxx_mtd, parts, nb_parts);
- return 0;
-}
-
-static void __exit db1x00_mtd_cleanup(void)
-{
- if (db1xxx_mtd) {
- del_mtd_partitions(db1xxx_mtd);
- map_destroy(db1xxx_mtd);
- if (parsed_parts)
- kfree(parsed_parts);
- }
-}
-
-module_init(db1x00_mtd_init);
-module_exit(db1x00_mtd_cleanup);
-
-MODULE_AUTHOR("Pete Popov");
-MODULE_DESCRIPTION("Db1x00 mtd map driver");
-MODULE_LICENSE("GPL");
/*
- * $Id: dbox2-flash.c,v 1.11 2004/07/12 21:59:43 dwmw2 Exp $
+ * $Id: dbox2-flash.c,v 1.9 2003/05/21 12:45:18 dwmw2 Exp $
*
* D-Box 2 flash driver
*/
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/config.h>
-#include <linux/errno.h>
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
.mask_flags = MTD_WRITEABLE
},
{
- .name = "FLFS (U-Boot)",
+ .name = "flfs (ppcboot)",
.size = 128 * 1024,
.offset = MTDPART_OFS_APPEND,
.mask_flags = 0
},
{
- .name = "Root (SquashFS)",
+ .name = "root (cramfs)",
.size = 7040 * 1024,
.offset = MTDPART_OFS_APPEND,
.mask_flags = 0
},
{
- .name = "var (JFFS2)",
+ .name = "var (jffs2)",
.size = 896 * 1024,
.offset = MTDPART_OFS_APPEND,
.mask_flags = 0
},
{
- .name = "Flash without bootloader",
+ .name = "flash without bootloader",
.size = MTDPART_SIZ_FULL,
.offset = 128 * 1024,
.mask_flags = 0
},
{
- .name = "Complete Flash",
+ .name = "complete flash",
.size = MTDPART_SIZ_FULL,
.offset = 0,
.mask_flags = MTD_WRITEABLE
struct map_info dbox2_flash_map = {
.name = "D-Box 2 flash memory",
.size = WINDOW_SIZE,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = WINDOW_ADDR,
};
mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
if (!mymtd) {
// Probe for single Intel 28F640
- dbox2_flash_map.bankwidth = 2;
+ dbox2_flash_map.buswidth = 2;
mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
}
*
* This code is GPL
*
- * $Id: dc21285.c,v 1.20 2004/07/12 22:38:29 dwmw2 Exp $
+ * $Id: dc21285.c,v 1.15 2003/05/21 12:45:18 dwmw2 Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <asm/io.h>
#include <asm/hardware/dec21285.h>
-#include <asm/mach-types.h>
-static struct mtd_info *dc21285_mtd;
+static struct mtd_info *mymtd;
-#ifdef CONFIG_ARCH_NETWINDER
-/*
- * This is really ugly, but it seams to be the only
- * realiable way to do it, as the cpld state machine
- * is unpredictible. So we have a 25us penalty per
- * write access.
- */
-static void nw_en_write(void) {
- extern spinlock_t gpio_lock;
- unsigned long flags;
-
- /*
- * we want to write a bit pattern XXX1 to Xilinx to enable
- * the write gate, which will be open for about the next 2ms.
- */
- spin_lock_irqsave(&gpio_lock, flags);
- cpld_modify(1, 1);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- /*
- * let the ISA bus to catch on...
- */
- udelay(25);
-}
-#else
-#define nw_en_write() do { } while (0)
-#endif
-
-static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
+__u8 dc21285_read8(struct map_info *map, unsigned long ofs)
{
- return *(uint8_t*)(map->map_priv_1 + ofs);
+ return *(__u8*)(map->map_priv_1 + ofs);
}
-static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
+__u16 dc21285_read16(struct map_info *map, unsigned long ofs)
{
- return *(uint16_t*)(map->map_priv_1 + ofs);
+ return *(__u16*)(map->map_priv_1 + ofs);
}
-static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
+__u32 dc21285_read32(struct map_info *map, unsigned long ofs)
{
- return *(uint32_t*)(map->map_priv_1 + ofs);
+ return *(__u32*)(map->map_priv_1 + ofs);
}
-static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
memcpy(to, (void*)(map->map_priv_1 + from), len);
}
-static void dc21285_write(struct map_info *map, map_word d, unsigned long adr)
+void dc21285_write8(struct map_info *map, __u8 d, unsigned long adr)
{
- if (machine_is_netwinder())
- nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
- *(uint8_t*)(map->map_priv_1 + adr) = d.x[0];
+ *(__u8*)(map->map_priv_1 + adr) = d;
}
-static void dc21285_write16(struct map_info *map, map_word d, unsigned long adr)
+void dc21285_write16(struct map_info *map, __u16 d, unsigned long adr)
{
- if (machine_is_netwinder())
- nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
- *(uint16_t*)(map->map_priv_1 + adr) = d.x[0];
+ *(__u16*)(map->map_priv_1 + adr) = d;
}
-static void dc21285_write32(struct map_info *map, map_word d, unsigned long adr)
+void dc21285_write32(struct map_info *map, __u32 d, unsigned long adr)
{
- if (machine_is_netwinder())
- nw_en_write();
- *(uint32_t*)(map->map_priv_1 + adr) = d.x[0];
+ *(__u32*)(map->map_priv_1 + adr) = d;
}
-static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+void dc21285_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
- while (len > 0) {
- uint32_t d = *((uint32_t*)from)++;
- dc21285_write32(map, d, to);
- to += 4;
- len -= 4;
- }
-}
-
-static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while (len > 0) {
- uint16_t d = *((uint16_t*)from)++;
- dc21285_write16(map, d, to);
- to += 2;
- len -= 2;
+ switch (map->buswidth) {
+ case 4:
+ while (len > 0) {
+ __u32 d = *((__u32*)from)++;
+ dc21285_write32(map, d, to);
+ to += 4;
+ len -= 4;
+ }
+ break;
+ case 2:
+ while (len > 0) {
+ __u16 d = *((__u16*)from)++;
+ dc21285_write16(map, d, to);
+ to += 2;
+ len -= 2;
+ }
+ break;
+ case 1:
+ while (len > 0) {
+ __u8 d = *((__u8*)from)++;
+ dc21285_write8(map, d, to);
+ to++;
+ len--;
+ }
+ break;
}
}
-static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- uint8_t d = *((uint8_t*)from)++;
- dc21285_write8(map, d, to);
- to++;
- len--;
-}
-
-static struct map_info dc21285_map = {
+struct map_info dc21285_map = {
.name = "DC21285 flash",
.phys = NO_XIP,
.size = 16*1024*1024,
+ .read8 = dc21285_read8,
+ .read16 = dc21285_read16,
+ .read32 = dc21285_read32,
.copy_from = dc21285_copy_from,
+ .write8 = dc21285_write8,
+ .write16 = dc21285_write16,
+ .write32 = dc21285_write32,
+ .copy_to = dc21285_copy_to
};
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
#endif
-static int __init init_dc21285(void)
+int __init init_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
- int nrparts;
-#endif
-
- /* Determine bankwidth */
+ /*
+ * Flash timing is determined with bits 19-16 of the
+ * CSR_SA110_CNTL. The value is the number of wait cycles, or
+ * 0 for 16 cycles (the default). Cycles are 20 ns.
+ * Here we use 7 for 140 ns flash chips.
+ */
+ /* access time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
+ /* burst time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
+ /* tristate time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
+
+ /* Determine buswidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
- dc21285_map.bankwidth = 1;
- dc21285_map.read = dc21285_read8;
- dc21285_map.write = dc21285_write8;
- dc21285_map.copy_to = dc21285_copy_to_8;
+ dc21285_map.buswidth = 1;
break;
case SA110_CNTL_ROMWIDTH_16:
- dc21285_map.bankwidth = 2;
- dc21285_map.read = dc21285_read16;
- dc21285_map.write = dc21285_write16;
- dc21285_map.copy_to = dc21285_copy_to_16;
+ dc21285_map.buswidth = 2;
break;
case SA110_CNTL_ROMWIDTH_32:
- dc21285_map.bankwidth = 4;
+ dc21285_map.buswidth = 4;
break;
- dc21285_map.read = dc21285_read32;
- dc21285_map.write = dc21285_write32;
- dc21285_map.copy_to = dc21285_copy_to_32;
default:
- printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
+ printk (KERN_ERR "DC21285 flash: undefined buswidth\n");
return -ENXIO;
}
- printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
- dc21285_map.bankwidth*8);
+ printk (KERN_NOTICE "DC21285 flash support (%d-bit buswidth)\n",
+ dc21285_map.buswidth*8);
/* Let's map the flash area */
dc21285_map.map_priv_1 = (unsigned long)ioremap(DC21285_FLASH, 16*1024*1024);
return -EIO;
}
- if (machine_is_ebsa285()) {
- dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
- } else {
- dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
- }
+ mymtd = do_map_probe("cfi_probe", &dc21285_map);
+ if (mymtd) {
+ int nrparts = 0;
- if (!dc21285_mtd) {
- iounmap((void *)dc21285_map.map_priv_1);
- return -ENXIO;
- }
-
- dc21285_mtd->owner = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
+
+ /* partition fixup */
#ifdef CONFIG_MTD_PARTITIONS
- nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, (void *)0);
- if (nrparts > 0)
- add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
- else
-#endif
- add_mtd_device(dc21285_mtd);
-
- if(machine_is_ebsa285()) {
- /*
- * Flash timing is determined with bits 19-16 of the
- * CSR_SA110_CNTL. The value is the number of wait cycles, or
- * 0 for 16 cycles (the default). Cycles are 20 ns.
- * Here we use 7 for 140 ns flash chips.
- */
- /* access time */
- *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
- /* burst time */
- *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
- /* tristate time */
- *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
+ nrparts = parse_mtd_partitions(mymtd, probes, &dc21285_parts, (void *)0);
+ if (nrparts > 0) {
+ add_mtd_partitions(mymtd, dc21285_parts, nrparts);
+ return 0;
+ }
+#endif
+ add_mtd_device(mymtd);
+ return 0;
}
-
- return 0;
+
+ iounmap((void *)dc21285_map.map_priv_1);
+ return -ENXIO;
}
static void __exit cleanup_dc21285(void)
{
#ifdef CONFIG_MTD_PARTITIONS
if (dc21285_parts) {
- del_mtd_partitions(dc21285_mtd);
+ del_mtd_partitions(mymtd);
kfree(dc21285_parts);
} else
#endif
- del_mtd_device(dc21285_mtd);
+ del_mtd_device(mymtd);
- map_destroy(dc21285_mtd);
+ map_destroy(mymtd);
iounmap((void *)dc21285_map.map_priv_1);
}
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: dilnetpc.c,v 1.13 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: dilnetpc.c,v 1.12 2003/05/21 12:45:18 dwmw2 Exp $
*
* The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
* featuring the AMD Elan SC410 processor. There are two variants of this
static struct map_info dnpc_map = {
.name = "ADNP Flash Bank",
.size = ADNP_WINDOW_SIZE,
- .bankwidth = 1,
+ .buswidth = 1,
.set_vpp = adnp_set_vpp,
.phys = WINDOW_ADDR
};
+++ /dev/null
-
-/*
- * drivers/mtd/maps/svme182.c
- *
- * Flash map driver for the Dy4 SVME182 board
- *
- * $Id: dmv182.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $
- *
- * Copyright 2003-2004, TimeSys Corporation
- *
- * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/*
- * This driver currently handles only the 16MiB user flash bank 1 on the
- * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
- * (VxWorks boot), or the optional 48MiB expansion flash.
- *
- * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
- * now supports the first 96MiB (the boot flash bank containing FFW
- * is excluded). The VxWorks loader is in partition 1.
- */
-
-#define FLASH_BASE_ADDR 0xf0000000
-#define FLASH_BANK_SIZE (128*1024*1024)
-
-MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
-MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
-MODULE_LICENSE("GPL");
-
-static struct map_info svme182_map = {
- .name = "Dy4 SVME182",
- .bankwidth = 32,
- .size = 128 * 1024 * 1024
-};
-
-#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
-
-// Allow 6MiB for the kernel
-#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
-// Allow 1MiB for the bootloader
-#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
-// Use the remaining 9MiB at the end of flash for the RFS
-#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
- NEW_BOOTIMAGE_PART_SIZE)
-
-static struct mtd_partition svme182_partitions[] = {
- // The Lower PABS is only 128KiB, but the partition code doesn't
- // like partitions that don't end on the largest erase block
- // size of the device, even if all of the erase blocks in the
- // partition are small ones. The hardware should prevent
- // writes to the actual PABS areas.
- {
- name: "Lower PABS and CPU 0 bootloader or kernel",
- size: 6*1024*1024,
- offset: 0,
- },
- {
- name: "Root Filesystem",
- size: 10*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "CPU1 Bootloader",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- },
- {
- name: "Extra",
- size: 110*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "Foundation Firmware and Upper PABS",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- mask_flags: MTD_WRITEABLE // read-only
- }
-};
-
-static struct mtd_info *this_mtd;
-
-static int __init init_svme182(void)
-{
- struct mtd_partition *partitions;
- int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition);
-
- partitions = svme182_partitions;
-
- svme182_map.virt =
- (unsigned long)ioremap(FLASH_BASE_ADDR, svme182_map.size);
-
- if (svme182_map.virt == 0) {
- printk("Failed to ioremap FLASH memory area.\n");
- return -EIO;
- }
-
- simple_map_init(&svme182_map);
-
- this_mtd = do_map_probe("cfi_probe", &svme182_map);
- if (!this_mtd)
- {
- iounmap((void *)svme182_map.virt);
- return -ENXIO;
- }
-
- printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
- this_mtd->size >> 20, FLASH_BASE_ADDR);
-
- this_mtd->owner = THIS_MODULE;
- add_mtd_partitions(this_mtd, partitions, num_parts);
-
- return 0;
-}
-
-static void __exit cleanup_svme182(void)
-{
- if (this_mtd)
- {
- del_mtd_partitions(this_mtd);
- map_destroy(this_mtd);
- }
-
- if (svme182_map.virt)
- {
- iounmap((void *)svme182_map.virt);
- svme182_map.virt = 0;
- }
-
- return;
-}
-
-module_init(init_svme182);
-module_exit(cleanup_svme182);
/*
- * $Id: ebony.c,v 1.10 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: ebony.c,v 1.8 2003/06/23 11:48:18 dwmw2 Exp $
*
* Mapping for Ebony user flash
*
#include <linux/mtd/partitions.h>
#include <linux/config.h>
#include <asm/io.h>
-#include <asm/ibm44x.h>
+#include <asm/ibm440.h>
#include <platforms/ebony.h>
static struct mtd_info *flash;
static struct map_info ebony_small_map = {
.name = "Ebony small flash",
.size = EBONY_SMALL_FLASH_SIZE,
- .bankwidth = 1,
+ .buswidth = 1,
};
static struct map_info ebony_large_map = {
.name = "Ebony large flash",
.size = EBONY_LARGE_FLASH_SIZE,
- .bankwidth = 1,
+ .buswidth = 1,
};
static struct mtd_partition ebony_small_partitions[] = {
return -ENOMEM;
fpga0_reg = readb(fpga0_adr);
- iounmap(fpga0_adr);
+ iounmap64(fpga0_adr);
if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
!EBONY_FLASH_SEL(fpga0_reg))
/*
- * $Id: edb7312.c,v 1.11 2004/07/14 09:52:55 dwmw2 Exp $
+ * $Id: edb7312.c,v 1.9 2003/06/23 11:48:18 dwmw2 Exp $
*
* Handle mapping of the NOR flash on Cogent EDB7312 boards
*
#define BUSWIDTH 2
#define FLASH_BLOCKSIZE_MAIN 0x20000
#define FLASH_NUMBLOCKS_MAIN 128
-/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
-#define PROBETYPES { "cfi_probe", NULL }
+/* can be "cfi_probe", "jedec_probe", "map_rom", 0 }; */
+#define PROBETYPES { "cfi_probe", 0 }
#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
struct map_info edb7312nor_map = {
.name = "NOR flash on EDB7312",
.size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
.phys = WINDOW_ADDR,
};
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: elan-104nc.c,v 1.21 2004/07/12 22:38:29 dwmw2 Exp $
+ $Id: elan-104nc.c,v 1.18 2003/06/23 07:37:02 dwmw2 Exp $
The ELAN-104NC has up to 8 Mibyte of Intel StrataFlash (28F320/28F640) in x16
mode. This drivers uses the CFI probe and Intel Extended Command Set drivers.
}
-static map_word elan_104nc_read16(struct map_info *map, unsigned long ofs)
+static __u8 elan_104nc_read8(struct map_info *map, unsigned long ofs)
{
- map_word ret;
+ __u8 ret;
spin_lock(&elan_104nc_spin);
elan_104nc_page(map, ofs);
- ret.x[0] = readw(iomapadr + (ofs & WINDOW_MASK));
+ ret = readb(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&elan_104nc_spin);
+ return ret;
+}
+
+static __u16 elan_104nc_read16(struct map_info *map, unsigned long ofs)
+{
+ __u16 ret;
+ spin_lock(&elan_104nc_spin);
+ elan_104nc_page(map, ofs);
+ ret = readw(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&elan_104nc_spin);
+ return ret;
+}
+
+static __u32 elan_104nc_read32(struct map_info *map, unsigned long ofs)
+{
+ __u32 ret;
+ spin_lock(&elan_104nc_spin);
+ elan_104nc_page(map, ofs);
+ ret = readl(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&elan_104nc_spin);
return ret;
}
static void elan_104nc_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- while (len) {
+ while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
}
}
-static void elan_104nc_write16(struct map_info *map, map_word d, unsigned long adr)
+static void elan_104nc_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ spin_lock(&elan_104nc_spin);
+ elan_104nc_page(map, adr);
+ writeb(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&elan_104nc_spin);
+}
+
+static void elan_104nc_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ spin_lock(&elan_104nc_spin);
+ elan_104nc_page(map, adr);
+ writew(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&elan_104nc_spin);
+}
+
+static void elan_104nc_write32(struct map_info *map, __u32 d, unsigned long adr)
{
spin_lock(&elan_104nc_spin);
elan_104nc_page(map, adr);
- writew(d.x[0], iomapadr + (adr & WINDOW_MASK));
+ writel(d, iomapadr + (adr & WINDOW_MASK));
spin_unlock(&elan_104nc_spin);
}
.size = 8*1024*1024, /* this must be set to a maximum possible amount
of flash so the cfi probe routines find all
the chips */
- .bankwidth = 2,
- .read = elan_104nc_read16,
+ .buswidth = 2,
+ .read8 = elan_104nc_read8,
+ .read16 = elan_104nc_read16,
+ .read32 = elan_104nc_read32,
.copy_from = elan_104nc_copy_from,
- .write = elan_104nc_write16,
+ .write8 = elan_104nc_write8,
+ .write16 = elan_104nc_write16,
+ .write32 = elan_104nc_write32,
.copy_to = elan_104nc_copy_to
};
* Copyright (C) 2001 Altera Corporation
* Copyright (C) 2001 Red Hat, Inc.
*
- * $Id: epxa10db-flash.c,v 1.11 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: epxa10db-flash.c,v 1.10 2003/05/21 12:45:18 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
static struct map_info epxa_map = {
.name = "EPXA flash",
.size = FLASH_SIZE,
- .bankwidth = 2,
+ .buswidth = 2,
.phys = FLASH_START,
};
/* fortunet.c memory map
*
- * $Id: fortunet.c,v 1.7 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: fortunet.c,v 1.6 2003/05/21 12:45:18 dwmw2 Exp $
*/
#include <linux/module.h>
struct map_region
{
int window_addr_physical;
- int altbankwidth;
+ int altbuswidth;
struct map_info map_info;
struct mtd_info *mymtd;
struct mtd_partition parts[MAX_NUM_PARTITIONS];
struct map_info default_map = {
.size = DEF_WINDOW_SIZE,
- .bankwidth = 4,
+ .buswidth = 4,
};
static char * __init get_string_option(char *dest,int dest_size,char *sor)
if(params[0]<1)
{
printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
- " name,region-number[,base,size,bankwidth,altbankwidth]\n");
+ " name,region-number[,base,size,buswidth,altbuswidth]\n");
return 1;
}
if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
&default_map,sizeof(map_regions[params[1]].map_info));
map_regions_set[params[1]] = 1;
map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[params[1]].altbankwidth = 2;
+ map_regions[params[1]].altbuswidth = 2;
map_regions[params[1]].mymtd = NULL;
map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
strcpy(map_regions[params[1]].map_info.name,string);
}
if(params[0]>3)
{
- map_regions[params[1]].map_info.bankwidth = params[4];
+ map_regions[params[1]].map_info.buswidth = params[4];
}
if(params[0]>4)
{
- map_regions[params[1]].altbankwidth = params[5];
+ map_regions[params[1]].altbuswidth = params[5];
}
return 1;
}
sizeof(map_regions[ix].map_info));
map_regions_set[ix] = 1;
map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[ix].altbankwidth = 2;
+ map_regions[ix].altbuswidth = 2;
map_regions[ix].mymtd = NULL;
map_regions[ix].map_info.name = map_regions[ix].map_name;
strcpy(map_regions[ix].map_info.name,"FORTUNET");
map_regions[ix].mymtd = do_map_probe("cfi_probe",
&map_regions[ix].map_info);
if((!map_regions[ix].mymtd)&&(
- map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
+ map_regions[ix].altbuswidth!=map_regions[ix].map_info.buswidth))
{
- printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
+ printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate buswidth "
"for %s flash.\n",
map_regions[ix].map_info.name);
- map_regions[ix].map_info.bankwidth =
- map_regions[ix].altbankwidth;
+ map_regions[ix].map_info.buswidth =
+ map_regions[ix].altbuswidth;
map_regions[ix].mymtd = do_map_probe("cfi_probe",
&map_regions[ix].map_info);
}
* Flash memory access on Hynix GMS30C7201/HMS30C7202 based
* evaluation boards
*
- * $Id: h720x-flash.c,v 1.9 2004/07/14 17:45:40 dwmw2 Exp $
- *
* (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
* 2003 Thomas Gleixner <tglx@linutronix.de>
- */
+*/
#include <linux/config.h>
#include <linux/module.h>
static struct map_info h720x_map = {
.name = "H720X",
- .bankwidth = 4,
+ .buswidth = 4,
.size = FLASH_SIZE,
.phys = FLASH_PHYS,
};
simple_map_init(&h720x_map);
- // Probe for flash bankwidth 4
+ // Probe for flash buswidth 4
printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
mymtd = do_map_probe("cfi_probe", &h720x_map);
if (!mymtd) {
printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
- // Probe for bankwidth 2
- h720x_map.bankwidth = 2;
+ // Probe for buswidth 2
+ h720x_map.buswidth = 2;
mymtd = do_map_probe("cfi_probe", &h720x_map);
}
+++ /dev/null
-/*
- * ichxrom.c
- *
- * Normal mappings of chips in physical memory
- * $Id: ichxrom.c,v 1.8 2004/07/16 17:43:11 dwmw2 Exp $
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/mtd/cfi.h>
-
-#define xstr(s) str(s)
-#define str(s) #s
-#define MOD_NAME xstr(KBUILD_BASENAME)
-
-#define MTD_DEV_NAME_LENGTH 16
-
-#define RESERVE_MEM_REGION 0
-
-
-#define MANUFACTURER_INTEL 0x0089
-#define I82802AB 0x00ad
-#define I82802AC 0x00ac
-
-#define ICHX_FWH_REGION_START 0xFF000000UL
-#define ICHX_FWH_REGION_SIZE 0x01000000UL
-#define BIOS_CNTL 0x4e
-#define FWH_DEC_EN1 0xE3
-#define FWH_DEC_EN2 0xF0
-#define FWH_SEL1 0xE8
-#define FWH_SEL2 0xEE
-
-struct ichxrom_map_info {
- struct map_info map;
- struct mtd_info *mtd;
- unsigned long window_addr;
- struct pci_dev *pdev;
- struct resource window_rsrc;
- struct resource rom_rsrc;
- char mtd_name[MTD_DEV_NAME_LENGTH];
-};
-
-static inline unsigned long addr(struct map_info *map, unsigned long ofs)
-{
- unsigned long offset;
- offset = ((8*1024*1024) - map->size) + ofs;
- if (offset >= (4*1024*1024)) {
- offset += 0x400000;
- }
- return map->map_priv_1 + 0x400000 + offset;
-}
-
-static inline unsigned long dbg_addr(struct map_info *map, unsigned long addr)
-{
- return addr - map->map_priv_1 + ICHX_FWH_REGION_START;
-}
-
-static map_word ichxrom_read(struct map_info *map, unsigned long ofs)
-{
- map_word val;
- int i;
- switch(map->bankwidth) {
- case 1: val.x[0] = __raw_readb(addr(map, ofs)); break;
- case 2: val.x[0] = __raw_readw(addr(map, ofs)); break;
- case 4: val.x[0] = __raw_readl(addr(map, ofs)); break;
-#if BITS_PER_LONG >= 64
- case 8: val.x[0] = __raw_readq(addr(map, ofs)); break;
-#endif
- default: val.x[0] = 0; break;
- }
- for(i = 1; i < map_words(map); i++) {
- val.x[i] = 0;
- }
- return val;
-}
-
-static void ichxrom_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, addr(map, from), len);
-}
-
-static void ichxrom_write(struct map_info *map, map_word d, unsigned long ofs)
-{
- switch(map->bankwidth) {
- case 1: __raw_writeb(d.x[0], addr(map,ofs)); break;
- case 2: __raw_writew(d.x[0], addr(map,ofs)); break;
- case 4: __raw_writel(d.x[0], addr(map,ofs)); break;
-#if BITS_PER_LONG >= 64
- case 8: __raw_writeq(d.x[0], addr(map,ofs)); break;
-#endif
- }
- mb();
-}
-
-static void ichxrom_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(addr(map, to), from, len);
-}
-
-static struct ichxrom_map_info ichxrom_map = {
- .map = {
- .name = MOD_NAME,
- .phys = NO_XIP,
- .size = 0,
- .bankwidth = 1,
- .read = ichxrom_read,
- .copy_from = ichxrom_copy_from,
- .write = ichxrom_write,
- .copy_to = ichxrom_copy_to,
- /* Firmware hubs only use vpp when being programmed
- * in a factory setting. So in-place programming
- * needs to use a different method.
- */
- },
- /* remaining fields of structure are initialized to 0 */
-};
-
-enum fwh_lock_state {
- FWH_DENY_WRITE = 1,
- FWH_IMMUTABLE = 2,
- FWH_DENY_READ = 4,
-};
-
-static void ichxrom_cleanup(struct ichxrom_map_info *info)
-{
- u16 word;
-
- /* Disable writes through the rom window */
- pci_read_config_word(info->pdev, BIOS_CNTL, &word);
- pci_write_config_word(info->pdev, BIOS_CNTL, word & ~1);
-
- if (info->mtd) {
- del_mtd_device(info->mtd);
- map_destroy(info->mtd);
- info->mtd = NULL;
- info->map.virt = 0;
- }
- if (info->rom_rsrc.parent)
- release_resource(&info->rom_rsrc);
- if (info->window_rsrc.parent)
- release_resource(&info->window_rsrc);
-
- if (info->window_addr) {
- iounmap((void *)(info->window_addr));
- info->window_addr = 0;
- }
-}
-
-
-static int ichxrom_set_lock_state(struct mtd_info *mtd, loff_t ofs, size_t len,
- enum fwh_lock_state state)
-{
- struct map_info *map = mtd->priv;
- unsigned long start = ofs;
- unsigned long end = start + len -1;
-
- /* FIXME do I need to guard against concurrency here? */
- /* round down to 64K boundaries */
- start = start & ~0xFFFF;
- end = end & ~0xFFFF;
- while (start <= end) {
- unsigned long ctrl_addr;
- ctrl_addr = addr(map, start) - 0x400000 + 2;
- writeb(state, ctrl_addr);
- start = start + 0x10000;
- }
- return 0;
-}
-
-static int ichxrom_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- return ichxrom_set_lock_state(mtd, ofs, len, FWH_DENY_WRITE);
-}
-
-static int ichxrom_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- return ichxrom_set_lock_state(mtd, ofs, len, 0);
-}
-
-static int __devinit ichxrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u16 word;
- struct ichxrom_map_info *info = &ichxrom_map;
- unsigned long map_size;
- static char *probes[] = { "cfi_probe", "jedec_probe" };
- struct cfi_private *cfi;
-
- /* For now I just handle the ichx and I assume there
- * are not a lot of resources up at the top of the address
- * space. It is possible to handle other devices in the
- * top 16MB but it is very painful. Also since
- * you can only really attach a FWH to an ICHX there
- * a number of simplifications you can make.
- *
- * Also you can page firmware hubs if an 8MB window isn't enough
- * but don't currently handle that case either.
- */
-
- info->pdev = pdev;
-
- /*
- * Try to reserve the window mem region. If this fails then
- * it is likely due to the window being "reseved" by the BIOS.
- */
- info->window_rsrc.name = MOD_NAME;
- info->window_rsrc.start = ICHX_FWH_REGION_START;
- info->window_rsrc.end = ICHX_FWH_REGION_START + ICHX_FWH_REGION_SIZE - 1;
- info->window_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&iomem_resource, &info->window_rsrc)) {
- info->window_rsrc.parent = NULL;
- printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.08lx-0x%.08lx - kernel bug?\n",
- __func__,
- info->window_rsrc.start, info->window_rsrc.end);
- }
-
- /* Enable writes through the rom window */
- pci_read_config_word(pdev, BIOS_CNTL, &word);
- if (!(word & 1) && (word & (1<<1))) {
- /* The BIOS will generate an error if I enable
- * this device, so don't even try.
- */
- printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
- goto failed;
- }
- pci_write_config_word(pdev, BIOS_CNTL, word | 1);
-
-
- /* Map the firmware hub into my address space. */
- /* Does this use too much virtual address space? */
- info->window_addr = (unsigned long)ioremap(
- ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE);
- if (!info->window_addr) {
- printk(KERN_ERR "Failed to ioremap\n");
- goto failed;
- }
-
- /* For now assume the firmware has setup all relevant firmware
- * windows. We don't have enough information to handle this case
- * intelligently.
- */
-
- /* FIXME select the firmware hub and enable a window to it. */
-
- info->mtd = NULL;
- info->map.map_priv_1 = info->window_addr;
-
- /* Loop through the possible bankwidths */
- for(ichxrom_map.map.bankwidth = 4; ichxrom_map.map.bankwidth; ichxrom_map.map.bankwidth >>= 1) {
- map_size = ICHX_FWH_REGION_SIZE;
- while(!info->mtd && (map_size > 0)) {
- int i;
- info->map.size = map_size;
- for(i = 0; i < sizeof(probes)/sizeof(char *); i++) {
- info->mtd = do_map_probe(probes[i], &ichxrom_map.map);
- if (info->mtd)
- break;
- }
- map_size -= 512*1024;
- }
- if (info->mtd)
- break;
- }
- if (!info->mtd) {
- goto failed;
- }
- cfi = ichxrom_map.map.fldrv_priv;
- if ((cfi->mfr == MANUFACTURER_INTEL) && (
- (cfi->id == I82802AB) ||
- (cfi->id == I82802AC)))
- {
- /* If it is a firmware hub put in the special lock
- * and unlock routines.
- */
- info->mtd->lock = ichxrom_lock;
- info->mtd->unlock = ichxrom_unlock;
- }
- if (info->mtd->size > info->map.size) {
- printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%lu). fixing...\n",
- info->mtd->size, info->map.size);
- info->mtd->size = info->map.size;
- }
-
- info->mtd->owner = THIS_MODULE;
- add_mtd_device(info->mtd);
-
- if (info->window_rsrc.parent) {
- /*
- * Registering the MTD device in iomem may not be possible
- * if there is a BIOS "reserved" and BUSY range. If this
- * fails then continue anyway.
- */
- snprintf(info->mtd_name, MTD_DEV_NAME_LENGTH,
- "mtd%d", info->mtd->index);
-
- info->rom_rsrc.name = info->mtd_name;
- info->rom_rsrc.start = ICHX_FWH_REGION_START
- + ICHX_FWH_REGION_SIZE - map_size;
- info->rom_rsrc.end = ICHX_FWH_REGION_START
- + ICHX_FWH_REGION_SIZE;
- info->rom_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&info->window_rsrc, &info->rom_rsrc)) {
- printk(KERN_ERR MOD_NAME
- ": cannot reserve MTD resource\n");
- info->rom_rsrc.parent = NULL;
- }
- }
-
- return 0;
-
- failed:
- ichxrom_cleanup(info);
- return -ENODEV;
-}
-
-
-static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
-{
- struct ichxrom_map_info *info = &ichxrom_map;
- u16 word;
-
- del_mtd_device(info->mtd);
- map_destroy(info->mtd);
- info->mtd = NULL;
- info->map.map_priv_1 = 0;
-
- iounmap((void *)(info->window_addr));
- info->window_addr = 0;
-
- /* Disable writes through the rom window */
- pci_read_config_word(pdev, BIOS_CNTL, &word);
- pci_write_config_word(pdev, BIOS_CNTL, word & ~1);
-
-#if RESERVE_MEM_REGION
- release_mem_region(ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE);
-#endif
-}
-
-static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
- PCI_ANY_ID, PCI_ANY_ID, },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
-
-#if 0
-static struct pci_driver ichxrom_driver = {
- .name = MOD_NAME,
- .id_table = ichxrom_pci_tbl,
- .probe = ichxrom_init_one,
- .remove = ichxrom_remove_one,
-};
-#endif
-
-static struct pci_dev *mydev;
-int __init init_ichxrom(void)
-{
- struct pci_dev *pdev;
- struct pci_device_id *id;
-
- pdev = NULL;
- for (id = ichxrom_pci_tbl; id->vendor; id++) {
- pdev = pci_find_device(id->vendor, id->device, NULL);
- if (pdev) {
- break;
- }
- }
- if (pdev) {
- mydev = pdev;
- return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
- }
- return -ENXIO;
-#if 0
- return pci_module_init(&ichxrom_driver);
-#endif
-}
-
-static void __exit cleanup_ichxrom(void)
-{
- ichxrom_remove_one(mydev);
-}
-
-module_init(init_ichxrom);
-module_exit(cleanup_ichxrom);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
-MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
/*
- * $Id: impa7.c,v 1.11 2004/07/14 09:52:55 dwmw2 Exp $
+ * $Id: impa7.c,v 1.9 2003/06/23 11:47:43 dwmw2 Exp $
*
* Handle mapping of the NOR flash on implementa A7 boards
*
#define NUM_FLASHBANKS 2
#define BUSWIDTH 4
-/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
-#define PROBETYPES { "jedec_probe", NULL }
+/* can be { "cfi_probe", "jedec_probe", "map_rom", 0 }; */
+#define PROBETYPES { "jedec_probe", 0 }
#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
#define MTDID "impa7-%d" /* for mtdparts= partitioning */
-static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+static struct mtd_info *impa7_mtd[NUM_FLASHBANKS] = { 0 };
static struct map_info impa7_map[NUM_FLASHBANKS] = {
{
.name = "impA7 NOR Flash Bank #0",
.size = WINDOW_SIZE0,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
},
{
.name = "impA7 NOR Flash Bank #1",
.size = WINDOW_SIZE1,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
},
};
+++ /dev/null
-/*======================================================================
-
- drivers/mtd/maps/armflash.c: ARM Flash Layout/Partitioning
-
- Copyright (C) 2000 ARM Limited
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This is access code for flashes using ARM's flash partitioning
- standards.
-
- $Id: integrator-flash-v24.c,v 1.13 2004/07/12 21:59:44 dwmw2 Exp $
-
-======================================================================*/
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-// board specific stuff - sorry, it should be in arch/arm/mach-*.
-#ifdef CONFIG_ARCH_INTEGRATOR
-
-#define FLASH_BASE INTEGRATOR_FLASH_BASE
-#define FLASH_SIZE INTEGRATOR_FLASH_SIZE
-
-#define FLASH_PART_SIZE 0x400000
-
-#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
-#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
-#define EBI_CSR1 (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_CSR1_OFFSET)
-#define EBI_LOCK (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_LOCK_OFFSET)
-
-/*
- * Initialise the flash access systems:
- * - Disable VPP
- * - Assert WP
- * - Set write enable bit in EBI reg
- */
-static void armflash_flash_init(void)
-{
- unsigned int tmp;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
-
- tmp = __raw_readl(EBI_CSR1) | INTEGRATOR_EBI_WRITE_ENABLE;
- __raw_writel(tmp, EBI_CSR1);
-
- if (!(__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE)) {
- __raw_writel(0xa05f, EBI_LOCK);
- __raw_writel(tmp, EBI_CSR1);
- __raw_writel(0, EBI_LOCK);
- }
-}
-
-/*
- * Shutdown the flash access systems:
- * - Disable VPP
- * - Assert WP
- * - Clear write enable bit in EBI reg
- */
-static void armflash_flash_exit(void)
-{
- unsigned int tmp;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
-
- /*
- * Clear the write enable bit in system controller EBI register.
- */
- tmp = __raw_readl(EBI_CSR1) & ~INTEGRATOR_EBI_WRITE_ENABLE;
- __raw_writel(tmp, EBI_CSR1);
-
- if (__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE) {
- __raw_writel(0xa05f, EBI_LOCK);
- __raw_writel(tmp, EBI_CSR1);
- __raw_writel(0, EBI_LOCK);
- }
-}
-
-static void armflash_flash_wp(int on)
-{
- unsigned int reg;
-
- if (on)
- reg = SC_CTRLC;
- else
- reg = SC_CTRLS;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLWP, reg);
-}
-
-static void armflash_set_vpp(struct map_info *map, int on)
-{
- unsigned int reg;
-
- if (on)
- reg = SC_CTRLS;
- else
- reg = SC_CTRLC;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg);
-}
-#endif
-
-#ifdef CONFIG_ARCH_P720T
-
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-
-#define FLASH_PART_SIZE (4*1024*1024)
-#define FLASH_BLOCK_SIZE (128*1024)
-
-static void armflash_flash_init(void)
-{
-}
-
-static void armflash_flash_exit(void)
-{
-}
-
-static void armflash_flash_wp(int on)
-{
-}
-
-static void armflash_set_vpp(struct map_info *map, int on)
-{
-}
-#endif
-
-
-static struct map_info armflash_map =
-{
- .name = "AFS",
- .set_vpp = armflash_set_vpp,
- .phys = FLASH_BASE,
-};
-
-static struct mtd_info *mtd;
-static struct mtd_partition *parts;
-static const char *probes[] = { "RedBoot", "afs", NULL };
-
-static int __init armflash_cfi_init(void *base, u_int size)
-{
- int ret;
-
- armflash_flash_init();
- armflash_flash_wp(1);
-
- /*
- * look for CFI based flash parts fitted to this board
- */
- armflash_map.size = size;
- armflash_map.bankwidth = 4;
- armflash_map.virt = (unsigned long) base;
-
- simple_map_init(&armflash_map);
-
- /*
- * Also, the CFI layer automatically works out what size
- * of chips we have, and does the necessary identification
- * for us automatically.
- */
- mtd = do_map_probe("cfi_probe", &armflash_map);
- if (!mtd)
- return -ENXIO;
-
- mtd->owner = THIS_MODULE;
-
- ret = parse_mtd_partitions(mtd, probes, &parts, (void *)0);
- if (ret > 0) {
- ret = add_mtd_partitions(mtd, parts, ret);
- if (ret)
- printk(KERN_ERR "mtd partition registration "
- "failed: %d\n", ret);
- }
-
- /*
- * If we got an error, free all resources.
- */
- if (ret < 0) {
- del_mtd_partitions(mtd);
- map_destroy(mtd);
- }
-
- return ret;
-}
-
-static void armflash_cfi_exit(void)
-{
- if (mtd) {
- del_mtd_partitions(mtd);
- map_destroy(mtd);
- }
- if (parts)
- kfree(parts);
-}
-
-static int __init armflash_init(void)
-{
- int err = -EBUSY;
- void *base;
-
- if (request_mem_region(FLASH_BASE, FLASH_SIZE, "flash") == NULL)
- goto out;
-
- base = ioremap(FLASH_BASE, FLASH_SIZE);
- err = -ENOMEM;
- if (base == NULL)
- goto release;
-
- err = armflash_cfi_init(base, FLASH_SIZE);
- if (err) {
- iounmap(base);
-release:
- release_mem_region(FLASH_BASE, FLASH_SIZE);
- }
-out:
- return err;
-}
-
-static void __exit armflash_exit(void)
-{
- armflash_cfi_exit();
- iounmap((void *)armflash_map.virt);
- release_mem_region(FLASH_BASE, FLASH_SIZE);
- armflash_flash_exit();
-}
-
-module_init(armflash_init);
-module_exit(armflash_exit);
-
-MODULE_AUTHOR("ARM Ltd");
-MODULE_DESCRIPTION("ARM Integrator CFI map driver");
-MODULE_LICENSE("GPL");
This is access code for flashes using ARM's flash partitioning
standards.
- $Id: integrator-flash.c,v 1.16 2004/07/12 21:59:44 dwmw2 Exp $
+ $Id: integrator-flash.c,v 1.15 2004/02/27 22:37:39 rmk Exp $
======================================================================*/
* look for CFI based flash parts fitted to this board
*/
info->map.size = size;
- info->map.bankwidth = plat->width;
+ info->map.buswidth = plat->width;
info->map.phys = res->start;
info->map.virt = (unsigned long) base;
info->map.name = dev->dev.bus_id;
/*
- * $Id: iq80310.c,v 1.18 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: iq80310.c,v 1.17 2003/06/23 11:48:18 dwmw2 Exp $
*
* Mapping for the Intel XScale IQ80310 evaluation board
*
static struct map_info iq80310_map = {
.name = "IQ80310 flash",
.size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
.phys = WINDOW_ADDR
};
/*
- * $Id: ixp4xx.c,v 1.3 2004/07/12 22:38:29 dwmw2 Exp $
+ * $Id: ixp4xx.c,v 1.1 2004/05/13 22:21:26 dsaxena Exp $
*
* drivers/mtd/maps/ixp4xx.c
*
#define BYTE1(h) ((h) & 0xFF)
#endif
-static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+static __u16
+ixp4xx_read16(struct map_info *map, unsigned long ofs)
{
- map_word val;
- val.x[0] = *(__u16 *) (map->map_priv_1 + ofs);
- return val;
+ return *(__u16 *) (map->map_priv_1 + ofs);
}
/*
* when attached to a 16-bit wide device (such as the 28F128J3A),
* so we can't just memcpy_fromio().
*/
-static void ixp4xx_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
+static void
+ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
{
int i;
u8 *dest = (u8 *) to;
dest[len - 1] = BYTE0(src[i]);
}
-static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+static void
+ixp4xx_write16(struct map_info *map, __u16 d, unsigned long adr)
{
- *(__u16 *) (map->map_priv_1 + adr) = d.x[0];
+ *(__u16 *) (map->map_priv_1 + adr) = d;
}
struct ixp4xx_flash_info {
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-static int ixp4xx_flash_remove(struct device *_dev)
+static int
+ixp4xx_flash_remove(struct device *_dev)
{
struct platform_device *dev = to_platform_device(_dev);
struct flash_platform_data *plat = dev->dev.platform_data;
* any board use 8-bit access, we'll fixup the driver to
* handle that.
*/
- info->map.bankwidth = 2;
+ info->map.buswidth = 2;
info->map.name = dev->dev.bus_id;
- info->map.read = ixp4xx_read16,
- info->map.write = ixp4xx_write16,
+ info->map.read16 = ixp4xx_read16,
+ info->map.write16 = ixp4xx_write16,
info->map.copy_from = ixp4xx_copy_from,
info->res = request_mem_region(dev->resource->start,
/*
- * $Id: l440gx.c,v 1.13 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: l440gx.c,v 1.12 2003/05/21 12:45:19 dwmw2 Exp $
*
* BIOS Flash chip on Intel 440GX board.
*
struct map_info l440gx_map = {
.name = "L440GX BIOS",
.size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
+ .buswidth = BUSWIDTH,
.phys = WINDOW_ADDR,
#if 0
/* FIXME verify that this is the
/*
- * Flash device on Lasat 100 and 200 boards
+ * Flash device on lasat 100 and 200 boards
*
- * (C) 2002 Brian Murphy <brian@murphy.dk>
+ * Presumably (C) 2002 Brian Murphy <brian@murphy.dk> or whoever he
+ * works for.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
- * $Id: lasat.c,v 1.7 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: lasat.c,v 1.5 2003/05/21 12:45:19 dwmw2 Exp $
*
*/
#include <linux/mtd/partitions.h>
#include <linux/config.h>
#include <asm/lasat/lasat.h>
+#include <asm/lasat/lasat_mtd.h>
-static struct mtd_info *lasat_mtd;
+static struct mtd_info *mymtd;
+
+static struct map_info sp_map = {
+ .name = "SP flash",
+ .buswidth = 4,
+};
static struct mtd_partition partition_info[LASAT_MTD_LAST];
static char *lasat_mtd_partnames[] = {"Bootloader", "Service", "Normal", "Filesystem", "Config"};
-static void lasat_set_vpp(struct map_info *map, int vpp)
-{
- if (vpp)
- *lasat_misc->flash_wp_reg |= 1 << lasat_misc->flash_wp_bit;
- else
- *lasat_misc->flash_wp_reg &= ~(1 << lasat_misc->flash_wp_bit);
-}
-
-static struct map_info lasat_map = {
- .name = "LASAT flash",
- .bankwidth = 4,
- .set_vpp = lasat_set_vpp
-};
-
-static int __init init_lasat(void)
+static int __init init_sp(void)
{
int i;
- /* since we use AMD chips and set_vpp is not implimented
- * for these (yet) we still have to permanently enable flash write */
- printk(KERN_NOTICE "Unprotecting flash\n");
- ENABLE_VPP((&lasat_map));
+ /* this does not play well with the old flash code which
+ * protects and uprotects the flash when necessary */
+ /* FIXME: Implement set_vpp() */
+ printk(KERN_NOTICE "Unprotecting flash\n");
+ *lasat_misc->flash_wp_reg |= 1 << lasat_misc->flash_wp_bit;
+
+ sp_map.virt = lasat_flash_partition_start(LASAT_MTD_BOOTLOADER);
+ sp_map.phys = virt_to_phys(sp_map.virt);
+ sp_map.size = lasat_board_info.li_flash_size;
- lasat_map.phys = lasat_flash_partition_start(LASAT_MTD_BOOTLOADER);
- lasat_map.virt = (unsigned long)ioremap_nocache(
- lasat_map.phys, lasat_board_info.li_flash_size);
- lasat_map.size = lasat_board_info.li_flash_size;
+ simple_map_init(&sp_map);
- simple_map_init(&lasat_map);
+ printk(KERN_NOTICE "sp flash device: %lx at %lx\n",
+ sp_map.size, sp_map.phys);
for (i=0; i < LASAT_MTD_LAST; i++)
partition_info[i].name = lasat_mtd_partnames[i];
- lasat_mtd = do_map_probe("cfi_probe", &lasat_map);
-
- if (!lasat_mtd)
- lasat_mtd = do_map_probe("jedec_probe", &lasat_map);
-
- if (lasat_mtd) {
+ mymtd = do_map_probe("cfi_probe", &sp_map);
+ if (mymtd) {
u32 size, offset = 0;
- lasat_mtd->owner = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
for (i=0; i < LASAT_MTD_LAST; i++) {
size = lasat_flash_partition_size(i);
offset += size;
}
- add_mtd_partitions( lasat_mtd, partition_info, LASAT_MTD_LAST );
+ add_mtd_partitions( mymtd, partition_info, LASAT_MTD_LAST );
return 0;
}
return -ENXIO;
}
-static void __exit cleanup_lasat(void)
+static void __exit cleanup_sp(void)
{
- if (lasat_mtd) {
- del_mtd_partitions(lasat_mtd);
- map_destroy(lasat_mtd);
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
}
- if (lasat_map.virt) {
- lasat_map.virt = 0;
+ if (sp_map.virt) {
+ sp_map.virt = 0;
}
}
-module_init(init_lasat);
-module_exit(cleanup_lasat);
+module_init(init_sp);
+module_exit(cleanup_sp);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Brian Murphy <brian@murphy.dk>");
/*
- * $Id: lubbock-flash.c,v 1.15 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: lubbock-flash.c,v 1.9 2003/06/23 11:48:18 dwmw2 Exp $
*
* Map driver for the Lubbock developer platform.
*
#define WINDOW_SIZE 64*1024*1024
-static void lubbock_map_inval_cache(struct map_info *map, unsigned long from, ssize_t len)
-{
- consistent_sync((char *)map->cached + from, len, DMA_FROM_DEVICE);
-}
-
static struct map_info lubbock_maps[2] = { {
.size = WINDOW_SIZE,
.phys = 0x00000000,
- .inval_cache = lubbock_map_inval_cache,
}, {
.size = WINDOW_SIZE,
.phys = 0x04000000,
- .inval_cache = lubbock_map_inval_cache,
} };
static struct mtd_partition lubbock_partitions[] = {
int flashboot = (LUB_CONF_SWITCHES & 1);
int ret = 0, i;
- lubbock_maps[0].bankwidth = lubbock_maps[1].bankwidth =
+ lubbock_maps[0].buswidth = lubbock_maps[1].buswidth =
(BOOT_DEF & 1) ? 2 : 4;
/* Compensate for the nROMBT switch which swaps the flash banks */
ret = -ENOMEM;
continue;
}
- lubbock_maps[i].cached = __ioremap(lubbock_maps[i].phys,
- WINDOW_SIZE,
- L_PTE_CACHEABLE, 1);
- if (!lubbock_maps[i].cached)
- printk(KERN_WARNING "Failed to ioremap cached %s\n", lubbock_maps[i].name);
simple_map_init(&lubbock_maps[i]);
- printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit bankwidth)\n",
+ printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit buswidth)\n",
lubbock_maps[i].name, lubbock_maps[i].phys,
- lubbock_maps[i].bankwidth * 8);
+ lubbock_maps[i].buswidth * 8);
mymtds[i] = do_map_probe("cfi_probe", &lubbock_maps[i]);
if (!mymtds[i]) {
iounmap((void *)lubbock_maps[i].virt);
- if (lubbock_maps[i].cached)
- iounmap(lubbock_maps[i].cached);
if (!ret)
ret = -EIO;
continue;
map_destroy(mymtds[i]);
iounmap((void *)lubbock_maps[i].virt);
- if (lubbock_maps[i].cached)
- iounmap(lubbock_maps[i].cached);
if (parsed_parts[i])
kfree(parsed_parts[i]);
/*
- * $Id: map_funcs.c,v 1.9 2004/07/13 22:33:15 dwmw2 Exp $
+ * $Id: map_funcs.c,v 1.2 2003/05/21 15:15:07 dwmw2 Exp $
*
* Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
* is enabled.
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/io.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
-static map_word simple_map_read(struct map_info *map, unsigned long ofs)
+static u8 simple_map_read8(struct map_info *map, unsigned long ofs)
{
- return inline_map_read(map, ofs);
+ return __raw_readb(map->virt + ofs);
}
-static void simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+static u16 simple_map_read16(struct map_info *map, unsigned long ofs)
{
- inline_map_write(map, datum, ofs);
+ return __raw_readw(map->virt + ofs);
+}
+
+static u32 simple_map_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->virt + ofs);
+}
+
+static u64 simple_map_read64(struct map_info *map, unsigned long ofs)
+{
+#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
+ BUG();
+ return 0;
+#else
+ return __raw_readll(map->virt + ofs);
+#endif
+}
+
+static void simple_map_write8(struct map_info *map, u8 datum, unsigned long ofs)
+{
+ __raw_writeb(datum, map->virt + ofs);
+ mb();
+}
+
+static void simple_map_write16(struct map_info *map, u16 datum, unsigned long ofs)
+{
+ __raw_writew(datum, map->virt + ofs);
+ mb();
+}
+
+static void simple_map_write32(struct map_info *map, u32 datum, unsigned long ofs)
+{
+ __raw_writel(datum, map->virt + ofs);
+ mb();
+}
+
+static void simple_map_write64(struct map_info *map, u64 datum, unsigned long ofs)
+{
+#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
+ BUG();
+#else
+ __raw_writell(datum, map->virt + ofs);
+ mb();
+#endif /* CFI_B8 */
}
static void simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- inline_map_copy_from(map, to, from, len);
+ memcpy_fromio(to, map->virt + from, len);
}
static void simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
- inline_map_copy_to(map, to, from, len);
+ memcpy_toio(map->virt + to, from, len);
}
void simple_map_init(struct map_info *map)
{
- BUG_ON(!map_bankwidth_supported(map->bankwidth));
-
- map->read = simple_map_read;
- map->write = simple_map_write;
+ map->read8 = simple_map_read8;
+ map->read16 = simple_map_read16;
+ map->read32 = simple_map_read32;
+ map->read64 = simple_map_read64;
+ map->write8 = simple_map_write8;
+ map->write16 = simple_map_write16;
+ map->write32 = simple_map_write32;
+ map->write64 = simple_map_write64;
map->copy_from = simple_map_copy_from;
map->copy_to = simple_map_copy_to;
}
/*
- * $Id: mbx860.c,v 1.6 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: mbx860.c,v 1.5 2003/05/21 12:45:19 dwmw2 Exp $
*
* Handle mapping of the flash on MBX860 boards
*
.name = "MBX flash",
.size = WINDOW_SIZE,
.phys = WINDOW_ADDR,
- .bankwidth = 4,
+ .buswidth = 4,
};
int __init init_mbx(void)
+++ /dev/null
-/*
- * Flash on MPC-1211
- *
- * $Id: mpc1211.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $
- *
- * (C) 2002 Interface, Saito.K & Jeanne
- *
- * GPL'd
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/config.h>
-
-static struct mtd_info *flash_mtd;
-static struct mtd_partition *parsed_parts;
-
-struct map_info mpc1211_flash_map = {
- .name = "MPC-1211 FLASH",
- .size = 0x80000,
- .bankwidth = 1,
-};
-
-static struct mtd_partition mpc1211_partitions[] = {
- {
- .name = "IPL & ETH-BOOT",
- .offset = 0x00000000,
- .size = 0x10000,
- },
- {
- .name = "Flash FS",
- .offset = 0x00010000,
- .size = MTDPART_SIZ_FULL,
- }
-};
-
-static int __init init_mpc1211_maps(void)
-{
- int nr_parts;
-
- mpc1211_flash_map.phys = 0;
- mpc1211_flash_map.virt = P2SEGADDR(0);
-
- simple_map_init(&mpc1211_flash_map);
-
- printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
- flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map);
- if (!flash_mtd) {
- printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
- return -ENXIO;
- }
- printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff);
- flash_mtd->module = THIS_MODULE;
-
- parsed_parts = mpc1211_partitions;
- nr_parts = ARRAY_SIZE(mpc1211_partitions);
-
- add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
- return 0;
-}
-
-static void __exit cleanup_mpc1211_maps(void)
-{
- if (parsed_parts)
- del_mtd_partitions(flash_mtd);
- else
- del_mtd_device(flash_mtd);
- map_destroy(flash_mtd);
-}
-
-module_init(init_mpc1211_maps);
-module_exit(cleanup_mpc1211_maps);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Saito.K & Jeanne <ksaito@interface.co.jp>");
-MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface");
* Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
* based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
*
- * $Id: netsc520.c,v 1.10 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: netsc520.c,v 1.9 2003/05/21 12:45:19 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
static struct map_info netsc520_map = {
.name = "netsc520 Flash Bank",
.size = WINDOW_SIZE,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = WINDOW_ADDR,
};
* (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
*
- * $Id: nettel.c,v 1.5 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: nettel.c,v 1.4 2003/05/20 20:59:30 dwmw2 Exp $
*/
/****************************************************************************/
static struct map_info nettel_intel_map = {
.name = "SnapGear Intel",
.size = 0,
- .bankwidth = INTEL_BUSWIDTH,
+ .buswidth = INTEL_BUSWIDTH,
};
static struct mtd_partition nettel_intel_partitions[] = {
static struct map_info nettel_amd_map = {
.name = "SnapGear AMD",
.size = AMD_WINDOW_MAXSIZE,
- .bankwidth = AMD_BUSWIDTH,
+ .buswidth = AMD_BUSWIDTH,
};
static struct mtd_partition nettel_amd_partitions[] = {
/*
- * $Id: ocelot.c,v 1.13 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: ocelot.c,v 1.12 2003/05/21 12:45:19 dwmw2 Exp $
*
* Flash on Momenco Ocelot
*/
struct map_info ocelot_flash_map = {
.name = "Ocelot boot flash",
.size = FLASH_WINDOW_SIZE,
- .bankwidth = FLASH_BUSWIDTH,
+ .buswidth = FLASH_BUSWIDTH,
.phys = FLASH_WINDOW_ADDR,
};
struct map_info ocelot_nvram_map = {
.name = "Ocelot NVRAM",
.size = NVRAM_WINDOW_SIZE,
- .bankwidth = NVRAM_BUSWIDTH,
+ .buswidth = NVRAM_BUSWIDTH,
.phys = NVRAM_WINDOW_ADDR,
};
-// $Id: octagon-5066.c,v 1.26 2004/07/12 22:38:29 dwmw2 Exp $
+// $Id: octagon-5066.c,v 1.24 2003/05/21 15:15:07 dwmw2 Exp $
/* ######################################################################
Octagon 5066 MTD Driver.
}
-static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
+static __u8 oct5066_read8(struct map_info *map, unsigned long ofs)
{
- map_word ret;
+ __u8 ret;
spin_lock(&oct5066_spin);
oct5066_page(map, ofs);
- ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
+ ret = readb(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&oct5066_spin);
+ return ret;
+}
+
+static __u16 oct5066_read16(struct map_info *map, unsigned long ofs)
+{
+ __u16 ret;
+ spin_lock(&oct5066_spin);
+ oct5066_page(map, ofs);
+ ret = readw(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&oct5066_spin);
+ return ret;
+}
+
+static __u32 oct5066_read32(struct map_info *map, unsigned long ofs)
+{
+ __u32 ret;
+ spin_lock(&oct5066_spin);
+ oct5066_page(map, ofs);
+ ret = readl(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&oct5066_spin);
return ret;
}
}
}
-static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
+static void oct5066_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ spin_lock(&oct5066_spin);
+ oct5066_page(map, adr);
+ writeb(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&oct5066_spin);
+}
+
+static void oct5066_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ spin_lock(&oct5066_spin);
+ oct5066_page(map, adr);
+ writew(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&oct5066_spin);
+}
+
+static void oct5066_write32(struct map_info *map, __u32 d, unsigned long adr)
{
spin_lock(&oct5066_spin);
oct5066_page(map, adr);
- writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
+ writel(d, iomapadr + (adr & WINDOW_MASK));
spin_unlock(&oct5066_spin);
}
.name = "Octagon 5066 Socket",
.phys = NO_XIP,
.size = 512 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
+ .buswidth = 1,
+ .read8 = oct5066_read8,
+ .read16 = oct5066_read16,
+ .read32 = oct5066_read32,
.copy_from = oct5066_copy_from,
- .write = oct5066_write8,
+ .write8 = oct5066_write8,
+ .write16 = oct5066_write16,
+ .write32 = oct5066_write32,
.copy_to = oct5066_copy_to,
.map_priv_1 = 1<<6
},
.name = "Octagon 5066 Internal Flash",
.phys = NO_XIP,
.size = 2 * 1024 * 1024,
- .bankwidth = 1,
- .read = oct5066_read8,
+ .buswidth = 1,
+ .read8 = oct5066_read8,
+ .read16 = oct5066_read16,
+ .read32 = oct5066_read32,
.copy_from = oct5066_copy_from,
- .write = oct5066_write8,
+ .write8 = oct5066_write8,
+ .write16 = oct5066_write16,
+ .write32 = oct5066_write32,
.copy_to = oct5066_copy_to,
.map_priv_1 = 2<<6
}
+++ /dev/null
-/*
- * NOR Flash memory access on TI Toto board
- *
- * jzhang@ti.com (C) 2003 Texas Instruments.
- *
- * (C) 2002 MontVista Software, Inc.
- *
- * $Id: omap-toto-flash.c,v 1.2 2004/07/12 21:59:44 dwmw2 Exp $
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-
-
-#ifndef CONFIG_ARCH_OMAP
-#error This is for OMAP architecture only
-#endif
-
-//these lines need be moved to a hardware header file
-#define OMAP_TOTO_FLASH_BASE 0xd8000000
-#define OMAP_TOTO_FLASH_SIZE 0x80000
-
-static struct map_info omap_toto_map_flash = {
- .name = "OMAP Toto flash",
- .bankwidth = 2,
- .virt = OMAP_TOTO_FLASH_BASE,
-};
-
-
-static struct mtd_partition toto_flash_partitions[] = {
- {
- .name = "BootLoader",
- .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/
- .offset = 0,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- }, {
- .name = "ReservedSpace",
- .size = 0x00030000,
- .offset = MTDPART_OFS_APPEND,
- //mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- .name = "EnvArea", /* bottom 64KiB for env vars */
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *flash_mtd;
-
-static int __init init_flash (void)
-{
-
- struct mtd_partition *parts;
- int nb_parts = 0;
- int parsed_nr_parts = 0;
- const char *part_type;
-
- /*
- * Static partition definition selection
- */
- part_type = "static";
-
- parts = toto_flash_partitions;
- nb_parts = ARRAY_SIZE(toto_flash_partitions);
- omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE;
- omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE);
-
- simple_map_init(&omap_toto_map_flash);
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n",
- omap_toto_map_flash.bankwidth*8);
- flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash);
- if (!flash_mtd)
- return -ENXIO;
-
- if (parsed_nr_parts > 0) {
- parts = parsed_parts;
- nb_parts = parsed_nr_parts;
- }
-
- if (nb_parts == 0) {
- printk(KERN_NOTICE "OMAP toto flash: no partition info available,"
- "registering whole flash at once\n");
- if (add_mtd_device(flash_mtd)){
- return -ENXIO;
- }
- } else {
- printk(KERN_NOTICE "Using %s partition definition\n",
- part_type);
- return add_mtd_partitions(flash_mtd, parts, nb_parts);
- }
- return 0;
-}
-
-int __init omap_toto_mtd_init(void)
-{
- int status;
-
- if (status = init_flash()) {
- printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n");
- }
- return status;
-}
-
-static void __exit omap_toto_mtd_cleanup(void)
-{
- if (flash_mtd) {
- del_mtd_partitions(flash_mtd);
- map_destroy(flash_mtd);
- if (parsed_parts)
- kfree(parsed_parts);
- }
-}
-
-module_init(omap_toto_mtd_init);
-module_exit(omap_toto_mtd_cleanup);
-
-MODULE_AUTHOR("Jian Zhang");
-MODULE_DESCRIPTION("OMAP Toto board map driver");
-MODULE_LICENSE("GPL");
+++ /dev/null
-/*
- * Flash memory access on Alchemy Pb1550 board
- *
- * $Id: pb1550-flash.c,v 1.4 2004/07/14 17:45:40 dwmw2 Exp $
- *
- * (C) 2004 Embedded Edge, LLC, based on pb1550-flash.c:
- * (C) 2003 Pete Popov <ppopov@pacbell.net>
- *
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/au1000.h>
-#include <asm/pb1550.h>
-
-#ifdef DEBUG_RW
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-static unsigned long window_addr;
-static unsigned long window_size;
-
-
-static struct map_info pb1550_map = {
- .name = "Pb1550 flash",
-};
-
-static unsigned char flash_bankwidth = 4;
-
-/*
- * Support only 64MB NOR Flash parts
- */
-
-#ifdef PB1550_BOTH_BANKS
-/* both banks will be used. Combine the first bank and the first
- * part of the second bank together into a single jffs/jffs2
- * partition.
- */
-static struct mtd_partition pb1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
- * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
- */
- {
- .name = "User FS",
- .size = (0x1FC00000 - 0x18000000),
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(PB1550_BOOT_ONLY)
-static struct mtd_partition pb1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash
- */
- {
- .name = "User FS",
- .size = 0x03c00000,
- .offset = 0x0000000
- },{
- .name = "yamon",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000-0x40000), /* last 256KB is yamon env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#elif defined(PB1550_USER_ONLY)
-static struct mtd_partition pb1550_partitions[] = {
- /* assume boot[2:0]:swap is '0000' or '1000', which translates to:
- * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash
- */
- {
- .name = "User FS",
- .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */
- .offset = 0x0000000
- },{
- .name = "raw kernel",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-#else
-#error MTD_PB1550 define combo error /* should never happen */
-#endif
-
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
-static struct mtd_info *mymtd;
-
-/*
- * Probe the flash density and setup window address and size
- * based on user CONFIG options. There are times when we don't
- * want the MTD driver to be probing the boot or user flash,
- * so having the option to enable only one bank is important.
- */
-int setup_flash_params(void)
-{
- u16 boot_swapboot;
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) |
- ((bcsr->status >> 6) & 0x1);
- printk("Pb1550 MTD: boot:swap %d\n", boot_swapboot);
-
- switch (boot_swapboot) {
- case 0: /* 512Mbit devices, both enabled */
- case 1:
- case 8:
- case 9:
-#if defined(PB1550_BOTH_BANKS)
- window_addr = 0x18000000;
- window_size = 0x8000000;
-#elif defined(PB1550_BOOT_ONLY)
- window_addr = 0x1C000000;
- window_size = 0x4000000;
-#else /* USER ONLY */
- window_addr = 0x1E000000;
- window_size = 0x4000000;
-#endif
- break;
- case 0xC:
- case 0xD:
- case 0xE:
- case 0xF:
- /* 64 MB Boot NOR Flash is disabled */
- /* and the start address is moved to 0x0C00000 */
- window_addr = 0x0C000000;
- window_size = 0x4000000;
- default:
- printk("Pb1550 MTD: unsupported boot:swap setting\n");
- return 1;
- }
- return 0;
-}
-
-int __init pb1550_mtd_init(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0;
-
- /* Default flash bankwidth */
- pb1550_map.bankwidth = flash_bankwidth;
-
- if (setup_flash_params())
- return -ENXIO;
-
- /*
- * Static partition definition selection
- */
- parts = pb1550_partitions;
- nb_parts = NB_OF(pb1550_partitions);
- pb1550_map.size = window_size;
-
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE "Pb1550 flash: probing %d-bit flash bus\n",
- pb1550_map.bankwidth*8);
- pb1550_map.virt =
- (unsigned long)ioremap(window_addr, window_size);
- mymtd = do_map_probe("cfi_probe", &pb1550_map);
- if (!mymtd) return -ENXIO;
- mymtd->owner = THIS_MODULE;
-
- add_mtd_partitions(mymtd, parts, nb_parts);
- return 0;
-}
-
-static void __exit pb1550_mtd_cleanup(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- }
-}
-
-module_init(pb1550_mtd_init);
-module_exit(pb1550_mtd_cleanup);
-
-MODULE_AUTHOR("Embedded Edge, LLC");
-MODULE_DESCRIPTION("Pb1550 mtd map driver");
-MODULE_LICENSE("GPL");
*
* (C) 2001 Pete Popov <ppopov@mvista.com>
*
- * $Id: pb1xxx-flash.c,v 1.11 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: pb1xxx-flash.c,v 1.9 2003/06/23 11:48:18 dwmw2 Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#endif
#ifdef CONFIG_MIPS_PB1000
-
#define WINDOW_ADDR 0x1F800000
#define WINDOW_SIZE 0x800000
+#endif
+
+
+static struct map_info pb1xxx_map = {
+ .name = "Pb1xxx flash",
+};
+
+
+#ifdef CONFIG_MIPS_PB1000
+static unsigned long flash_size = 0x00800000;
+static unsigned char flash_buswidth = 4;
static struct mtd_partition pb1xxx_partitions[] = {
{
- .name = "yamon env",
- .size = 0x00020000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE},
- {
- .name = "User FS",
- .size = 0x003e0000,
- .offset = 0x20000,},
- {
- .name = "boot code",
- .size = 0x100000,
- .offset = 0x400000,
- .mask_flags = MTD_WRITEABLE},
- {
- .name = "raw/kernel",
- .size = 0x300000,
- .offset = 0x500000}
+ .name = "yamon env",
+ .size = 0x00020000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "User FS",
+ .size = 0x003e0000,
+ .offset = 0x20000,
+ },{
+ .name = "boot code",
+ .size = 0x100000,
+ .offset = 0x400000,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "raw/kernel",
+ .size = 0x300000,
+ .offset = 0x500000
+ }
};
#elif defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1100)
+static unsigned char flash_buswidth = 4;
#if defined(CONFIG_MTD_PB1500_BOOT) && defined(CONFIG_MTD_PB1500_USER)
-/* both 32MB banks will be used. Combine the first 32MB bank and the
- * first 28MB of the second bank together into a single jffs/jffs2
+/* both 32MiB banks will be used. Combine the first 32MiB bank and the
+ * first 28MiB of the second bank together into a single jffs/jffs2
* partition.
*/
+static unsigned long flash_size = 0x04000000;
#define WINDOW_ADDR 0x1C000000
#define WINDOW_SIZE 0x4000000
static struct mtd_partition pb1xxx_partitions[] = {
{
- .name = "User FS",
- .size = 0x3c00000,
- .offset = 0x0000000
+ .name = "User FS",
+ .size = 0x3c00000,
+ .offset = 0x0000000
},{
- .name = "yamon",
- .size = 0x0100000,
- .offset = 0x3c00000,
- .mask_flags = MTD_WRITEABLE
+ .name = "yamon",
+ .size = 0x0100000,
+ .offset = 0x3c00000,
+ .mask_flags = MTD_WRITEABLE
},{
- .name = "raw kernel",
- .size = 0x02c0000,
- .offset = 0x3d00000
+ .name = "raw kernel",
+ .size = 0x02c0000,
+ .offset = 0x3d00000
}
};
#elif defined(CONFIG_MTD_PB1500_BOOT) && !defined(CONFIG_MTD_PB1500_USER)
+static unsigned long flash_size = 0x02000000;
#define WINDOW_ADDR 0x1E000000
#define WINDOW_SIZE 0x2000000
static struct mtd_partition pb1xxx_partitions[] = {
{
- .name = "User FS",
- .size = 0x1c00000,
- .offset = 0x0000000
+ .name = "User FS",
+ .size = 0x1c00000,
+ .offset = 0x0000000
},{
- .name = "yamon",
- .size = 0x0100000,
- .offset = 0x1c00000,
- .mask_flags = MTD_WRITEABLE
+ .name = "yamon",
+ .size = 0x0100000,
+ .offset = 0x1c00000,
+ .mask_flags = MTD_WRITEABLE
},{
- .name = "raw kernel",
- .size = 0x02c0000,
- .offset = 0x1d00000
+ .name = "raw kernel",
+ .size = 0x02c0000,
+ .offset = 0x1d00000
}
};
#elif !defined(CONFIG_MTD_PB1500_BOOT) && defined(CONFIG_MTD_PB1500_USER)
+static unsigned long flash_size = 0x02000000;
#define WINDOW_ADDR 0x1C000000
#define WINDOW_SIZE 0x2000000
static struct mtd_partition pb1xxx_partitions[] = {
{
- .name = "User FS",
- .size = 0x1e00000,
- .offset = 0x0000000
+ .name = "User FS",
+ .size = 0x1e00000,
+ .offset = 0x0000000
},{
- .name = "raw kernel",
- .size = 0x0200000,
- .offset = 0x1e00000,
+ .name = "raw kernel",
+ .size = 0x0200000,
+ .offset = 0x1e00000,
}
};
#else
#error Unsupported board
#endif
-#define NAME "Pb1x00 Linux Flash"
-#define PADDR WINDOW_ADDR
-#define BUSWIDTH 4
-#define SIZE WINDOW_SIZE
-#define PARTITIONS 4
-
-static struct map_info pb1xxx_mtd_map = {
- .name = NAME,
- .size = SIZE,
- .bankwidth = BUSWIDTH,
- .phys = PADDR,
-};
-
-static struct mtd_info *pb1xxx_mtd;
+static struct mtd_partition *parsed_parts;
+static struct mtd_info *mymtd;
int __init pb1xxx_mtd_init(void)
{
int nb_parts = 0;
char *part_type;
+ /* Default flash buswidth */
+ pb1xxx_map.buswidth = flash_buswidth;
+
/*
* Static partition definition selection
*/
part_type = "static";
parts = pb1xxx_partitions;
nb_parts = ARRAY_SIZE(pb1xxx_partitions);
+ pb1xxx_map.size = flash_size;
/*
* Now let's probe for the actual flash. Do it here since
* specific machine settings might have been set above.
*/
printk(KERN_NOTICE "Pb1xxx flash: probing %d-bit flash bus\n",
- BUSWIDTH*8);
- pb1xxx_mtd_map.virt = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
+ pb1xxx_map.buswidth*8);
+ pb1xxx_map.phys = WINDOW_ADDR;
+ pb1xxx_map.virt = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
- simple_map_init(&pb1xxx_mtd_map);
+ simple_map_init(&pb1xxx_map);
- pb1xxx_mtd = do_map_probe("cfi_probe", &pb1xxx_mtd_map);
- if (!pb1xxx_mtd) return -ENXIO;
- pb1xxx_mtd->owner = THIS_MODULE;
+ mymtd = do_map_probe("cfi_probe", &pb1xxx_map);
+ if (!mymtd) {
+ iounmap(pb1xxx_map.virt);
+ return -ENXIO;
+ }
+ mymtd->owner = THIS_MODULE;
- add_mtd_partitions(pb1xxx_mtd, parts, nb_parts);
+ add_mtd_partitions(mymtd, parts, nb_parts);
return 0;
}
static void __exit pb1xxx_mtd_cleanup(void)
{
- if (pb1xxx_mtd) {
- del_mtd_partitions(pb1xxx_mtd);
- map_destroy(pb1xxx_mtd);
- iounmap((void *) pb1xxx_mtd_map.virt);
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
+ if (parsed_parts)
+ kfree(parsed_parts);
}
+ if (pb1xxx_map.virt)
+ iounmap(pb1xxx_map.virt);
}
module_init(pb1xxx_mtd_init);
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * $Id: pci.c,v 1.8 2004/07/12 22:38:29 dwmw2 Exp $
+ * $Id: pci.c,v 1.5 2003/05/20 20:59:31 dwmw2 Exp $
*
* Generic PCI memory map driver. We support the following boards:
* - Intel IQ80310 ATU.
struct pci_dev *dev;
};
-static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- map_word val;
- val.x[0]= readb(map->base + map->translate(map, ofs));
-// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
- return val;
-}
-
-#if 0
-static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- map_word val;
- val.x[0] = readw(map->base + map->translate(map, ofs));
-// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
- return val;
-}
-#endif
-static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- map_word val;
- val.x[0] = readl(map->base + map->translate(map, ofs));
-// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
- return val;
-}
-
-static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- memcpy_fromio(to, map->base + map->translate(map, from), len);
-}
-
-static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
- writeb(val.x[0], map->base + map->translate(map, ofs));
-}
-
-#if 0
-static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
- writew(val.x[0], map->base + map->translate(map, ofs));
-}
-#endif
-static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
- writel(val.x[0], map->base + map->translate(map, ofs));
-}
-
-static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- memcpy_toio(map->base + map->translate(map, to), from, len);
-}
-
-static struct map_info mtd_pci_map = {
- .phys = NO_XIP,
- .copy_from = mtd_pci_copyfrom,
- .copy_to = mtd_pci_copyto,
-};
-
/*
* Intel IOP80310 Flash driver
*/
{
u32 win_base;
- map->map.bankwidth = 1;
- map->map.read = mtd_pci_read8,
- map->map.write = mtd_pci_write8,
-
+ map->map.buswidth = 1;
map->map.size = 0x00800000;
map->base = ioremap_nocache(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!len || !base)
return -ENXIO;
- map->map.bankwidth = 4;
- map->map.read = mtd_pci_read32,
- map->map.write = mtd_pci_write32,
+ map->map.buswidth = 4;
map->map.size = len;
map->base = ioremap_nocache(base, len);
* Generic code follows.
*/
+static u8 mtd_pci_read8(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u8 val = readb(map->base + map->translate(map, ofs));
+// printk("read8 : %08lx => %02x\n", ofs, val);
+ return val;
+}
+
+static u16 mtd_pci_read16(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u16 val = readw(map->base + map->translate(map, ofs));
+// printk("read16: %08lx => %04x\n", ofs, val);
+ return val;
+}
+
+static u32 mtd_pci_read32(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ u32 val = readl(map->base + map->translate(map, ofs));
+// printk("read32: %08lx => %08x\n", ofs, val);
+ return val;
+}
+
+static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_fromio(to, map->base + map->translate(map, from), len);
+}
+
+static void mtd_pci_write8(struct map_info *_map, u8 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write8 : %08lx <= %02x\n", ofs, val);
+ writeb(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write16(struct map_info *_map, u16 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write16: %08lx <= %04x\n", ofs, val);
+ writew(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write32(struct map_info *_map, u32 val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write32: %08lx <= %08x\n", ofs, val);
+ writel(val, map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_toio(map->base + map->translate(map, to), from, len);
+}
+
+static struct map_info mtd_pci_map = {
+ .phys = NO_XIP,
+ .read8 = mtd_pci_read8,
+ .read16 = mtd_pci_read16,
+ .read32 = mtd_pci_read32,
+ .copy_from = mtd_pci_copyfrom,
+ .write8 = mtd_pci_write8,
+ .write16 = mtd_pci_write16,
+ .write32 = mtd_pci_write32,
+ .copy_to = mtd_pci_copyto,
+};
+
static int __devinit
mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
/*
- * $Id: pcmciamtd.c,v 1.51 2004/07/12 22:38:29 dwmw2 Exp $
+ * $Id: pcmciamtd.c,v 1.48 2003/06/24 07:14:38 spse Exp $
*
* pcmciamtd.c - MTD driver for PCMCIA flash memory cards
*
#define DRIVER_DESC "PCMCIA Flash memory card driver"
-#define DRIVER_VERSION "$Revision: 1.51 $"
+#define DRIVER_VERSION "$Revision: 1.48 $"
/* Size of the PCMCIA address space: 26 bits = 64 MB */
#define MAX_PCMCIA_ADDR 0x4000000
/* Module parameters */
/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
-static int bankwidth = 2;
+static int buswidth = 2;
/* Speed of memory accesses, in ns */
static int mem_speed;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_PARM(bankwidth, "i");
-MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
+MODULE_PARM(buswidth, "i");
+MODULE_PARM_DESC(buswidth, "Set buswidth (1=8 bit, 2=16 bit, default=2)");
MODULE_PARM(mem_speed, "i");
MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
MODULE_PARM(force_size, "i");
}
-static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
+static u8 pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
{
caddr_t addr;
- map_word d = {{0}};
+ u8 d;
addr = remap_window(map, ofs);
if(!addr)
- return d;
+ return 0;
- d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
+ d = readb(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d);
return d;
}
-static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
+static u16 pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
{
caddr_t addr;
- map_word d = {{0}};
+ u16 d;
addr = remap_window(map, ofs);
if(!addr)
- return d;
+ return 0;
- d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
+ d = readw(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d);
return d;
}
}
-static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
+static void pcmcia_write8_remap(struct map_info *map, u8 d, unsigned long adr)
{
caddr_t addr = remap_window(map, adr);
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
- writeb(d.x[0], addr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d);
+ writeb(d, addr);
}
-static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
+static void pcmcia_write16_remap(struct map_info *map, u16 d, unsigned long adr)
{
caddr_t addr = remap_window(map, adr);
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
- writew(d.x[0], addr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d);
+ writew(d, addr);
}
#define DEV_REMOVED(x) (!(*(u_int *)x->map_priv_1 & DEV_PRESENT))
-static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
+static u8 pcmcia_read8(struct map_info *map, unsigned long ofs)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
- map_word d = {{0}};
+ u8 d;
if(DEV_REMOVED(map))
- return d;
+ return 0;
- d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
+ d = readb(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d);
return d;
}
-static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
+static u16 pcmcia_read16(struct map_info *map, unsigned long ofs)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
- map_word d = {{0}};
+ u16 d;
if(DEV_REMOVED(map))
- return d;
+ return 0;
- d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
+ d = readw(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d);
return d;
}
case CISTPL_DEVICE_GEO: {
cistpl_device_geo_t *t = &parse.device_geo;
int i;
- dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
+ dev->pcmcia_map.buswidth = t->geo[0].buswidth;
for(i = 0; i < t->ngeo; i++) {
- DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
+ DEBUG(2, "region: %d buswidth = %u", i, t->geo[i].buswidth);
DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
if(!dev->pcmcia_map.size)
dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
- if(!dev->pcmcia_map.bankwidth)
- dev->pcmcia_map.bankwidth = 2;
+ if(!dev->pcmcia_map.buswidth)
+ dev->pcmcia_map.buswidth = 2;
if(force_size) {
dev->pcmcia_map.size = force_size << 20;
DEBUG(2, "size forced to %dM", force_size);
}
- if(bankwidth) {
- dev->pcmcia_map.bankwidth = bankwidth;
- DEBUG(2, "bankwidth forced to %d", bankwidth);
+ if(buswidth) {
+ dev->pcmcia_map.buswidth = buswidth;
+ DEBUG(2, "buswidth forced to %d", buswidth);
}
dev->pcmcia_map.name = dev->mtd_name;
}
DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
- dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+ dev->pcmcia_map.size, dev->pcmcia_map.buswidth << 3, dev->mtd_name);
}
card_settings(dev, link, &new_name);
dev->pcmcia_map.phys = NO_XIP;
+ dev->pcmcia_map.read8 = pcmcia_read8_remap;
+ dev->pcmcia_map.read16 = pcmcia_read16_remap;
dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
+ dev->pcmcia_map.write8 = pcmcia_write8_remap;
+ dev->pcmcia_map.write16 = pcmcia_write16_remap;
dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
- if (dev->pcmcia_map.bankwidth == 1) {
- dev->pcmcia_map.read = pcmcia_read8_remap;
- dev->pcmcia_map.write = pcmcia_write8_remap;
- } else {
- dev->pcmcia_map.read = pcmcia_read16_remap;
- dev->pcmcia_map.write = pcmcia_write16_remap;
- }
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
whole card - otherwise we try smaller windows until we succeed */
req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
- req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
+ req.Attributes |= (dev->pcmcia_map.buswidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
req.Base = 0;
req.AccessSpeed = mem_speed;
link->win = (window_handle_t)link->handle;
DEBUG(1, "Using non remapping memory functions");
dev->pcmcia_map.map_priv_1 = (unsigned long)&(dev->link.state);
dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
- if (dev->pcmcia_map.bankwidth == 1) {
- dev->pcmcia_map.read = pcmcia_read8;
- dev->pcmcia_map.write = pcmcia_write8;
- } else {
- dev->pcmcia_map.read = pcmcia_read16;
- dev->pcmcia_map.write = pcmcia_write16;
- }
+ dev->pcmcia_map.read8 = pcmcia_read8;
+ dev->pcmcia_map.read16 = pcmcia_read16;
dev->pcmcia_map.copy_from = pcmcia_copy_from;
+ dev->pcmcia_map.write8 = pcmcia_write8;
+ dev->pcmcia_map.write16 = pcmcia_write16;
dev->pcmcia_map.copy_to = pcmcia_copy_to;
}
{
info(DRIVER_DESC " " DRIVER_VERSION);
- if(bankwidth && bankwidth != 1 && bankwidth != 2) {
- info("bad bankwidth (%d), using default", bankwidth);
- bankwidth = 2;
+ if(buswidth && buswidth != 1 && buswidth != 2) {
+ info("bad buswidth (%d), using default", buswidth);
+ buswidth = 2;
}
if(force_size && (force_size < 1 || force_size > 64)) {
info("bad force_size (%d), using default", force_size);
/*
- * $Id: physmap.c,v 1.34 2004/07/21 00:16:14 jwboyer Exp $
+ * $Id: physmap.c,v 1.29 2003/05/29 09:24:10 dwmw2 Exp $
*
* Normal mappings of chips in physical memory
- *
- * Copyright (C) 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * 031022 - [jsun] add run-time configure and partition setup
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/mtd/partitions.h>
+#define WINDOW_ADDR CONFIG_MTD_PHYSMAP_START
+#define WINDOW_SIZE CONFIG_MTD_PHYSMAP_LEN
+#define BUSWIDTH CONFIG_MTD_PHYSMAP_BUSWIDTH
+
static struct mtd_info *mymtd;
+
struct map_info physmap_map = {
- .name = "phys_mapped_flash",
- .phys = CONFIG_MTD_PHYSMAP_START,
- .size = CONFIG_MTD_PHYSMAP_LEN,
- .bankwidth = CONFIG_MTD_PHYSMAP_BANKWIDTH,
+ .name = "Physically mapped flash",
+ .size = WINDOW_SIZE,
+ .buswidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
};
#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *mtd_parts;
static int mtd_parts_nb;
-static int num_physmap_partitions;
-static struct mtd_partition *physmap_partitions;
+static struct mtd_partition physmap_partitions[] = {
+#if 0
+/* Put your own partition definitions here */
+ {
+ .name = "bootROM",
+ .size = 0x80000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "zImage",
+ .size = 0x100000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "ramdisk.gz",
+ .size = 0x300000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "User FS",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+#endif
+};
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+#define NUM_PARTITIONS (sizeof(physmap_partitions)/sizeof(struct mtd_partition))
+const char *part_probes[] = {"cmdlinepart", "RedBoot", NULL};
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
-{
- physmap_partitions=parts;
- num_physmap_partitions=num_parts;
-}
#endif /* CONFIG_MTD_PARTITIONS */
-static int __init init_physmap(void)
+int __init init_physmap(void)
{
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
const char **type;
- printk(KERN_NOTICE "physmap flash device: %lx at %lx\n", physmap_map.size, physmap_map.phys);
- physmap_map.virt = (unsigned long)ioremap(physmap_map.phys, physmap_map.size);
+ printk(KERN_NOTICE "physmap flash device: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
+ physmap_map.virt = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!physmap_map.virt) {
printk("Failed to ioremap\n");
return 0;
}
- if (num_physmap_partitions != 0)
+ if (NUM_PARTITIONS != 0)
{
printk(KERN_NOTICE
"Using physmap partition definition\n");
- add_mtd_partitions (mymtd, physmap_partitions, num_physmap_partitions);
+ add_mtd_partitions (mymtd, physmap_partitions, NUM_PARTITIONS);
return 0;
}
if (mtd_parts_nb) {
del_mtd_partitions(mymtd);
kfree(mtd_parts);
- } else if (num_physmap_partitions) {
+ } else if (NUM_PARTITIONS) {
del_mtd_partitions(mymtd);
} else {
del_mtd_device(mymtd);
*
* This code is GPL
*
- * $Id: pnc2000.c,v 1.15 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: pnc2000.c,v 1.14 2003/05/21 12:45:19 dwmw2 Exp $
*/
#include <linux/module.h>
struct map_info pnc_map = {
.name = "PNC-2000",
.size = WINDOW_SIZE,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = 0xFFFFFFFF,
.virt = WINDOW_ADDR,
};
/*
- * $Id: redwood.c,v 1.8 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: redwood.c,v 1.6 2003/05/21 12:45:19 dwmw2 Exp $
*
* drivers/mtd/maps/redwood.c
*
* FLASH map for the IBM Redwood 4/5/6 boards.
*
- * Author: MontaVista Software, Inc. <source@mvista.com>
*
- * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under
+ * Author: Armin Kuster <akuster@mvista.com>
+ *
+ * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
static struct mtd_partition redwood_flash_partitions[] = {
{
- .name = "Redwood filesystem",
+ .name = "Redwood kernel",
.offset = RW_PART0_OF,
.size = RW_PART0_SZ
},
.mask_flags = MTD_WRITEABLE /* force read-only */
},
{
- .name = "Redwood kernel",
+ .name = "Redwood filesystem",
.offset = RW_PART2_OF,
.size = RW_PART2_SZ
},
struct map_info redwood_flash_map = {
.name = "IBM Redwood",
.size = WINDOW_SIZE,
- .bankwidth = 2,
+ .buswidth = 2,
.phys = WINDOW_ADDR,
};
module_exit(cleanup_redwood_flash);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("MontaVista Software <source@mvista.com>");
+MODULE_AUTHOR("Armin Kuster <akuster@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards");
/*
- * $Id: rpxlite.c,v 1.20 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: rpxlite.c,v 1.19 2003/05/21 12:45:19 dwmw2 Exp $
*
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
static struct map_info rpxlite_map = {
.name = "RPX",
.size = WINDOW_SIZE,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = WINDOW_ADDR,
};
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
- * $Id: sa1100-flash.c,v 1.39 2004/07/12 21:59:44 dwmw2 Exp $
+ * $Id: sa1100-flash.c,v 1.36 2003/05/29 08:59:35 dwmw2 Exp $
*/
#include <linux/config.h>
sa[i].map->virt = (unsigned long)sa[i].vbase;
sa[i].map->phys = sa[i].base;
sa[i].map->set_vpp = sa[i].set_vpp;
- sa[i].map->bankwidth = sa[i].width;
+ sa[i].map->buswidth = sa[i].width;
sa[i].map->size = sa[i].size;
simple_map_init(sa[i].map);
return;
}
- sa1100_probe_map.bankwidth = msc & MSC_RBW ? 2 : 4;
+ sa1100_probe_map.buswidth = msc & MSC_RBW ? 2 : 4;
sa1100_probe_map.size = SZ_1M;
sa1100_probe_map.phys = phys;
sa1100_probe_map.virt = (unsigned long)ioremap(phys, SZ_1M);
return nr;
/*
- * Retrieve the bankwidth from the MSC registers.
+ * Retrieve the buswidth from the MSC registers.
* We currently only implement CS0 and CS1 here.
*/
for (i = 0; i < nr; i++) {
+++ /dev/null
-/*
- * Handle mapping of the flash memory access routines on the SBC8240 board.
- *
- * Carolyn Smith, Tektronix, Inc.
- *
- * This code is GPLed
- *
- * $Id: sbc8240.c,v 1.4 2004/07/12 22:38:29 dwmw2 Exp $
- *
- */
-
-/*
- * The SBC8240 has 2 flash banks.
- * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors.
- * It contains the U-Boot code (7 sectors) and the environment (1 sector).
- * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector,
- * 2 x 8 KiB sectors, 1 x 16 KiB sectors.
- * Both parts are JEDEC compatible.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/cfi.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
-#include <linux/mtd/partitions.h>
-#endif
-
-#define DEBUG
-
-#ifdef DEBUG
-# define debugk(fmt,args...) printk(fmt ,##args)
-#else
-# define debugk(fmt,args...)
-#endif
-
-
-#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */
-#define WINDOW_SIZE0 0x00080000
-#define BUSWIDTH0 1
-
-#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */
-#define WINDOW_SIZE1 0x00400000
-#define BUSWIDTH1 8
-
-#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */
-#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */
-
-
-static struct map_info sbc8240_map[2] = {
- {
- .name = "sbc8240 Flash Bank #0",
- .size = WINDOW_SIZE0,
- .bankwidth = BUSWIDTH0,
- },
- {
- .name = "sbc8240 Flash Bank #1",
- .size = WINDOW_SIZE1,
- .bankwidth = BUSWIDTH1,
- }
-};
-
-#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info))
-
-/*
- * The following defines the partition layout of SBC8240 boards.
- *
- * See include/linux/mtd/partitions.h for definition of the
- * mtd_partition structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size
- * which is not necessarily the actual flash size. It must correspond
- * to the value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-#ifdef CONFIG_MTD_PARTITIONS
-
-static struct mtd_partition sbc8240_uboot_partitions [] = {
- /* Bank 0 */
- {
- .name = "U-boot", /* U-Boot Firmware */
- .offset = 0,
- .size = 0x00070000, /* 7 x 64 KiB sectors */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "environment", /* U-Boot environment */
- .offset = 0x00070000,
- .size = 0x00010000, /* 1 x 64 KiB sector */
- },
-};
-
-static struct mtd_partition sbc8240_fs_partitions [] = {
- {
- .name = "jffs", /* JFFS filesystem */
- .offset = 0,
- .size = 0x003C0000, /* 4 * 15 * 64KiB */
- },
- {
- .name = "tmp32",
- .offset = 0x003C0000,
- .size = 0x00020000, /* 4 * 32KiB */
- },
- {
- .name = "tmp8a",
- .offset = 0x003E0000,
- .size = 0x00008000, /* 4 * 8KiB */
- },
- {
- .name = "tmp8b",
- .offset = 0x003E8000,
- .size = 0x00008000, /* 4 * 8KiB */
- },
- {
- .name = "tmp16",
- .offset = 0x003F0000,
- .size = 0x00010000, /* 4 * 16KiB */
- }
-};
-
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
-/* trivial struct to describe partition information */
-struct mtd_part_def
-{
- int nums;
- unsigned char *type;
- struct mtd_partition* mtd_part;
-};
-
-static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS];
-static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
-
-
-#endif /* CONFIG_MTD_PARTITIONS */
-
-
-int __init init_sbc8240_mtd (void)
-{
- static struct _cjs {
- u_long addr;
- u_long size;
- } pt[NUM_FLASH_BANKS] = {
- {
- .addr = WINDOW_ADDR0,
- .size = WINDOW_SIZE0
- },
- {
- .addr = WINDOW_ADDR1,
- .size = WINDOW_SIZE1
- },
- };
-
- int devicesfound = 0;
- int i;
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
- printk (KERN_NOTICE MSG_PREFIX
- "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr);
-
- sbc8240_map[i].map_priv_1 =
- (unsigned long) ioremap (pt[i].addr, pt[i].size);
- if (!sbc8240_map[i].map_priv_1) {
- printk (MSG_PREFIX "failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&sbc8240_mtd[i]);
-
- sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]);
-
- if (sbc8240_mtd[i]) {
- sbc8240_mtd[i]->module = THIS_MODULE;
- devicesfound++;
- }
- }
-
- if (!devicesfound) {
- printk(KERN_NOTICE MSG_PREFIX
- "No suppported flash chips found!\n");
- return -ENXIO;
- }
-
-#ifdef CONFIG_MTD_PARTITIONS
- sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
- sbc8240_part_banks[0].type = "static image";
- sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions);
- sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
- sbc8240_part_banks[1].type = "static file system";
- sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions);
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
-
- if (!sbc8240_mtd[i]) continue;
- if (sbc8240_part_banks[i].nums == 0) {
- printk (KERN_NOTICE MSG_PREFIX
- "No partition info available, registering whole device\n");
- add_mtd_device(sbc8240_mtd[i]);
- } else {
- printk (KERN_NOTICE MSG_PREFIX
- "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name);
- add_mtd_partitions (sbc8240_mtd[i],
- sbc8240_part_banks[i].mtd_part,
- sbc8240_part_banks[i].nums);
- }
- }
-#else
- printk(KERN_NOTICE MSG_PREFIX
- "Registering %d flash banks at once\n", devicesfound);
-
- for (i = 0; i < devicesfound; i++) {
- add_mtd_device(sbc8240_mtd[i]);
- }
-#endif /* CONFIG_MTD_PARTITIONS */
-
- return devicesfound == 0 ? -ENXIO : 0;
-}
-
-static void __exit cleanup_sbc8240_mtd (void)
-{
- int i;
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
- if (sbc8240_mtd[i]) {
- del_mtd_device (sbc8240_mtd[i]);
- map_destroy (sbc8240_mtd[i]);
- }
- if (sbc8240_map[i].map_priv_1) {
- iounmap ((void *) sbc8240_map[i].map_priv_1);
- sbc8240_map[i].map_priv_1 = 0;
- }
- }
-}
-
-module_init (init_sbc8240_mtd);
-module_exit (cleanup_sbc8240_mtd);
-
-MODULE_LICENSE ("GPL");
-MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>");
-MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards");
-
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: sbc_gxx.c,v 1.29 2004/07/12 22:38:29 dwmw2 Exp $
+ $Id: sbc_gxx.c,v 1.26 2003/05/26 08:50:36 dwmw2 Exp $
The SBC-MediaGX / SBC-GXx has up to 16 MiB of
Intel StrataFlash (28F320/28F640) in x8 mode.
}
-static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
+static __u8 sbc_gxx_read8(struct map_info *map, unsigned long ofs)
{
- map_word ret;
+ __u8 ret;
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, ofs);
- ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
+ ret = readb(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+ return ret;
+}
+
+static __u16 sbc_gxx_read16(struct map_info *map, unsigned long ofs)
+{
+ __u16 ret;
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, ofs);
+ ret = readw(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+ return ret;
+}
+
+static __u32 sbc_gxx_read32(struct map_info *map, unsigned long ofs)
+{
+ __u32 ret;
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, ofs);
+ ret = readl(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
return ret;
}
}
}
-static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
+static void sbc_gxx_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, adr);
+ writeb(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+}
+
+static void sbc_gxx_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, adr);
+ writew(d, iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+}
+
+static void sbc_gxx_write32(struct map_info *map, __u32 d, unsigned long adr)
{
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, adr);
- writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
+ writel(d, iomapadr + (adr & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
}
.size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
of flash so the cfi probe routines find all
the chips */
- .bankwidth = 1,
- .read = sbc_gxx_read8,
+ .buswidth = 1,
+ .read8 = sbc_gxx_read8,
+ .read16 = sbc_gxx_read16,
+ .read32 = sbc_gxx_read32,
.copy_from = sbc_gxx_copy_from,
- .write = sbc_gxx_write8,
+ .write8 = sbc_gxx_write8,
+ .write16 = sbc_gxx_write16,
+ .write32 = sbc_gxx_write32,
.copy_to = sbc_gxx_copy_to
};
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: sc520cdp.c,v 1.16 2004/07/12 21:59:45 dwmw2 Exp $
+ * $Id: sc520cdp.c,v 1.15 2003/05/21 12:45:20 dwmw2 Exp $
*
*
* The SC520CDP is an evaluation board for the Elan SC520 processor available
{
.name = "SC520CDP Flash Bank #0",
.size = WINDOW_SIZE_0,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = WINDOW_ADDR_0
},
{
.name = "SC520CDP Flash Bank #1",
.size = WINDOW_SIZE_1,
- .bankwidth = 4,
+ .buswidth = 4,
.phys = WINDOW_ADDR_1
},
{
.name = "SC520CDP DIL Flash",
.size = WINDOW_SIZE_2,
- .bankwidth = 1,
+ .buswidth = 1,
.phys = WINDOW_ADDR_2
},
};
/*
* MTD map driver for BIOS Flash on Intel SCB2 boards
- * $Id: scb2_flash.c,v 1.8 2004/07/12 21:59:45 dwmw2 Exp $
+ * $Id: scb2_flash.c,v 1.6 2003/05/21 12:45:20 dwmw2 Exp $
* Copyright (C) 2002 Sun Microsystems, Inc.
* Tim Hockin <thockin@sun.com>
*
struct map_info scb2_map = {
.name = "SCB2 BIOS Flash",
.size = 0,
- .bankwidth = 1,
+ .buswidth = 1,
};
static int region_fail;
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
- $Id: scx200_docflash.c,v 1.6 2004/07/12 21:59:45 dwmw2 Exp $
+ $Id: scx200_docflash.c,v 1.5 2003/05/21 12:45:20 dwmw2 Exp $
National Semiconductor SCx200 flash mapped with DOCCS
*/
scx200_docflash_map.size = size;
if (width == 8)
- scx200_docflash_map.bankwidth = 1;
+ scx200_docflash_map.buswidth = 1;
else
- scx200_docflash_map.bankwidth = 2;
+ scx200_docflash_map.buswidth = 2;
simple_map_init(&scx200_docflash_map);
/*
- * $Id: solutionengine.c,v 1.13 2004/07/12 21:59:45 dwmw2 Exp $
+ * $Id: solutionengine.c,v 1.10 2003/05/21 12:45:20 dwmw2 Exp $
*
* Flash and EPROM on Hitachi Solution Engine and similar boards.
*
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/config.h>
-#include <linux/errno.h>
+
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
struct map_info soleng_eprom_map = {
.name = "Solution Engine EPROM",
.size = 0x400000,
- .bankwidth = 4,
+ .buswidth = 4,
};
struct map_info soleng_flash_map = {
.name = "Solution Engine FLASH",
.size = 0x400000,
- .bankwidth = 4,
+ .buswidth = 4,
};
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-/* $Id: sun_uflash.c,v 1.9 2004/07/12 21:59:45 dwmw2 Exp $
+/* $Id: sun_uflash.c,v 1.7 2003/05/20 20:59:32 dwmw2 Exp $
*
* sun_uflash - Driver implementation for user-programmable flash
* present on many Sun Microsystems SME boardsets.
struct map_info uflash_map_templ = {
.name = "SUNW,???-????",
.size = UFLASH_WINDOW_SIZE,
- .bankwidth = UFLASH_BUSWIDTH,
+ .buswidth = UFLASH_BUSWIDTH,
};
int uflash_devinit(struct linux_ebus_device* edev)
* Handle mapping of the flash memory access routines
* on TQM8xxL based devices.
*
- * $Id: tqm8xxl.c,v 1.11 2004/07/12 21:59:45 dwmw2 Exp $
+ * $Id: tqm8xxl.c,v 1.9 2003/06/23 11:48:18 dwmw2 Exp $
*
* based on rpxlite.c
*
sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
map_banks[idx]->size = flash_size;
- map_banks[idx]->bankwidth = 4;
+ map_banks[idx]->buswidth = 4;
simple_map_init(map_banks[idx]);
* tsunami_flash.c
*
* flash chip on alpha ds10...
- * $Id: tsunami_flash.c,v 1.9 2004/07/14 09:52:55 dwmw2 Exp $
+ * $Id: tsunami_flash.c,v 1.6 2003/05/21 15:15:08 dwmw2 Exp $
*/
#include <asm/io.h>
#include <asm/core_tsunami.h>
#define FLASH_DISABLE_BYTE 0x00
#define MAX_TIG_FLASH_SIZE (12*1024*1024)
-static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
+static inline __u8 tsunami_flash_read8(struct map_info *map, unsigned long offset)
{
- map_word val;
- val.x[0] = tsunami_tig_readb(offset);
- return val;
+ return tsunami_tig_readb(offset);
}
-static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
+static void tsunami_flash_write8(struct map_info *map, __u8 value, unsigned long offset)
{
- tsunami_tig_writeb(value.x[0], offset);
+ tsunami_tig_writeb(value, offset);
}
static void tsunami_flash_copy_from(
.name = "flash chip on the Tsunami TIG bus",
.size = MAX_TIG_FLASH_SIZE,
.phys = NO_XIP;
- .bankwidth = 1,
- .read = tsunami_flash_read8,
+ .buswidth = 1,
+ .read8 = tsunami_flash_read8,
.copy_from = tsunami_flash_copy_from,
- .write = tsunami_flash_write8,
+ .write8 = tsunami_flash_write8,
.copy_to = tsunami_flash_copy_to,
};
static int __init init_tsunami_flash(void)
{
- static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
+ static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", 0 };
char **type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
*
- * $Id: uclinux.c,v 1.7 2004/07/12 21:59:45 dwmw2 Exp $
+ * $Id: uclinux.c,v 1.5 2003/05/20 20:59:32 dwmw2 Exp $
*/
/****************************************************************************/
mapp = &uclinux_ram_map;
mapp->phys = (unsigned long) &_ebss;
mapp->size = PAGE_ALIGN(*((unsigned long *)((&_ebss) + 8)));
- mapp->bankwidth = 4;
+ mapp->buswidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
(int) mapp->map_priv_2, (int) mapp->size);
-// $Id: vmax301.c,v 1.30 2004/07/12 22:38:29 dwmw2 Exp $
+// $Id: vmax301.c,v 1.28 2003/05/21 15:15:08 dwmw2 Exp $
/* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
__vmax301_page(map, page);
}
-static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
+static __u8 vmax301_read8(struct map_info *map, unsigned long ofs)
{
- map_word ret;
+ __u8 ret;
spin_lock(&vmax301_spin);
vmax301_page(map, ofs);
- ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
+ ret = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
+ spin_unlock(&vmax301_spin);
+ return ret;
+}
+
+static __u16 vmax301_read16(struct map_info *map, unsigned long ofs)
+{
+ __u16 ret;
+ spin_lock(&vmax301_spin);
+ vmax301_page(map, ofs);
+ ret = readw(map->map_priv_2 + (ofs & WINDOW_MASK));
+ spin_unlock(&vmax301_spin);
+ return ret;
+}
+
+static __u32 vmax301_read32(struct map_info *map, unsigned long ofs)
+{
+ __u32 ret;
+ spin_lock(&vmax301_spin);
+ vmax301_page(map, ofs);
+ ret = readl(map->map_priv_2 + (ofs & WINDOW_MASK));
spin_unlock(&vmax301_spin);
return ret;
}
}
}
-static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
+static void vmax301_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ spin_lock(&vmax301_spin);
+ vmax301_page(map, adr);
+ writeb(d, map->map_priv_2 + (adr & WINDOW_MASK));
+ spin_unlock(&vmax301_spin);
+}
+
+static void vmax301_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ spin_lock(&vmax301_spin);
+ vmax301_page(map, adr);
+ writew(d, map->map_priv_2 + (adr & WINDOW_MASK));
+ spin_unlock(&vmax301_spin);
+}
+
+static void vmax301_write32(struct map_info *map, __u32 d, unsigned long adr)
{
spin_lock(&vmax301_spin);
vmax301_page(map, adr);
- writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
+ writel(d, map->map_priv_2 + (adr & WINDOW_MASK));
spin_unlock(&vmax301_spin);
}
.name = "VMAX301 Internal Flash",
.phys = NO_XIP,
.size = 3*2*1024*1024,
- .bankwidth = 1,
- .read = vmax301_read8,
+ .buswidth = 1,
+ .read8 = vmax301_read8,
+ .read16 = vmax301_read16,
+ .read32 = vmax301_read32,
.copy_from = vmax301_copy_from,
- .write = vmax301_write8,
+ .write8 = vmax301_write8,
+ .write16 = vmax301_write16,
+ .write32 = vmax301_write32,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + WINDOW_LENGTH,
.map_priv_2 = 0xFFFFFFFF
.name = "VMAX301 Socket",
.phys = NO_XIP,
.size = 0,
- .bankwidth = 1,
- .read = vmax301_read8,
+ .buswidth = 1,
+ .read8 = vmax301_read8,
+ .read16 = vmax301_read16,
+ .read32 = vmax301_read32,
.copy_from = vmax301_copy_from,
- .write = vmax301_write8,
+ .write8 = vmax301_write8,
+ .write16 = vmax301_write16,
+ .write32 = vmax301_write32,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
.map_priv_2 = 0xFFFFFFFF
/*
- * $Id: wr_sbc82xx_flash.c,v 1.5 2004/07/15 14:52:02 dwmw2 Exp $
+ * $Id: wr_sbc82xx_flash.c,v 1.1 2004/06/07 10:21:32 dwmw2 Exp $
*
* Map for flash chips on Wind River PowerQUICC II SBC82xx board.
*
static struct mtd_partition bigflash_parts[] = {
{
.name = "bootloader",
- .size = 0x00100000,
+ .size = 0x80000,
.offset = 0,
}, {
.name = "file system",
- .size = 0x01f00000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "boot config",
- .size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "space",
- .size = 0x01f00000,
+ .size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
-#define init_sbc82xx_one_flash(map, br, or) \
-do { \
- (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
- (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
- switch (br & 0x00001800) { \
- case 0x00000000: \
- case 0x00000800: (map).bankwidth = 1; break; \
- case 0x00001000: (map).bankwidth = 2; break; \
- case 0x00001800: (map).bankwidth = 4; break; \
- } \
-} while (0);
-
int __init init_sbc82xx_flash(void)
{
- volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
+ volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
int bigflash;
int i;
-#ifdef CONFIG_SBC8560
- mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
-#else
- mc = &cpm2_immr->im_memctl;
-#endif
-
- bigflash = 1;
- if ((mc->memc_br0 & 0x00001800) == 0x00001800)
+ /* First, register the boot flash, whichever we're booting from */
+ if ((mc->memc_br0 & 0x00001800) == 0x00001800) {
bigflash = 0;
+ } else if ((mc->memc_br0 & 0x00001800) == 0x00000800) {
+ bigflash = 1;
+ } else {
+ printk(KERN_WARNING "Bus Controller register BR0 is %08x. Cannot determine flash configuration\n", mc->memc_br0);
+ return 1;
+ }
+
+ /* Set parameters for the big flash chip (CS6 or CS0) */
+ sbc82xx_flash_map[bigflash].buswidth = 4;
+ sbc82xx_flash_map[bigflash].size = 0x4000000;
+
+ /* Set parameters for the small flash chip (CS0 or CS6) */
+ sbc82xx_flash_map[!bigflash].buswidth = 1;
+ sbc82xx_flash_map[!bigflash].size = 0x200000;
- init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
- init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
- init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
+ /* Set parameters for the user flash chip (CS1) */
+ sbc82xx_flash_map[2].buswidth = 4;
+ sbc82xx_flash_map[2].size = 0x4000000;
-#ifdef CONFIG_SBC8560
- iounmap((void *) mc);
-#endif
+ sbc82xx_flash_map[0].phys = mc->memc_br0 & 0xffff8000;
+ sbc82xx_flash_map[1].phys = mc->memc_br6 & 0xffff8000;
+ sbc82xx_flash_map[2].phys = mc->memc_br1 & 0xffff8000;
for (i=0; i<3; i++) {
int8_t flashcs[3] = { 0, 6, 1 };
int nr_parts;
printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
- sbc82xx_flash_map[i].name,
- (sbc82xx_flash_map[i].size >> 20),
- flashcs[i]);
+ sbc82xx_flash_map[i].name, sbc82xx_flash_map[i].size >> 20, flashcs[i]);
if (!sbc82xx_flash_map[i].phys) {
/* We know it can't be at zero. */
printk("): disabled by bootloader.\n");
/*
- * $Id: mtd_blkdevs.c,v 1.22 2004/07/12 12:35:28 dwmw2 Exp $
+ * $Id: mtd_blkdevs.c,v 1.16 2003/06/23 13:34:43 dwmw2 Exp $
*
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
*
snprintf(gd->devfs_name, sizeof(gd->devfs_name),
"%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
- /* 2.5 has capacity in units of 512 bytes while still
- having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
- set_capacity(gd, (new->size * new->blksize) >> 9);
-
+ set_capacity(gd, new->size);
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
/*
* Direct MTD block device access
*
- * $Id: mtdblock.c,v 1.64 2003/10/04 17:14:14 dwmw2 Exp $
+ * $Id: mtdblock.c,v 1.63 2003/06/23 12:00:08 dwmw2 Exp $
*
* (C) 2000-2003 Nicolas Pitre <nico@cam.org>
* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
/*
- * $Id: mtdchar.c,v 1.64 2004/08/09 13:59:46 dwmw2 Exp $
+ * $Id: mtdchar.c,v 1.54 2003/05/21 10:50:43 dwmw2 Exp $
*
* Character-device access to raw MTD devices.
*
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fs.h>
#ifdef CONFIG_DEVFS_FS
#include <linux/devfs_fs_kernel.h>
-
-static void mtd_notify_add(struct mtd_info* mtd)
-{
- if (!mtd)
- return;
-
- devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
- S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
-
- devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
- S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index);
-}
-
-static void mtd_notify_remove(struct mtd_info* mtd)
-{
- if (!mtd)
- return;
- devfs_remove("mtd/%d", mtd->index);
- devfs_remove("mtd/%dro", mtd->index);
-}
+static void mtd_notify_add(struct mtd_info* mtd);
+static void mtd_notify_remove(struct mtd_info* mtd);
static struct mtd_notifier notifier = {
.add = mtd_notify_add,
.remove = mtd_notify_remove,
};
-static inline void mtdchar_devfs_init(void)
-{
- devfs_mk_dir("mtd");
- register_mtd_user(¬ifier);
-}
-
-static inline void mtdchar_devfs_exit(void)
-{
- unregister_mtd_user(¬ifier);
- devfs_remove("mtd");
-}
-#else /* !DEVFS */
-#define mtdchar_devfs_init() do { } while(0)
-#define mtdchar_devfs_exit() do { } while(0)
#endif
static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
IOCTL calls for getting device parameters.
======================================================================*/
-static void mtdchar_erase_callback (struct erase_info *instr)
+static void mtd_erase_callback (struct erase_info *instr)
{
wake_up((wait_queue_head_t *)instr->priv);
}
memset (erase,0,sizeof(struct erase_info));
if (copy_from_user(&erase->addr, argp,
- sizeof(struct erase_info_user))) {
+ 2 * sizeof(u_long))) {
kfree(erase);
return -EFAULT;
}
erase->mtd = mtd;
- erase->callback = mtdchar_erase_callback;
+ erase->callback = mtd_erase_callback;
erase->priv = (unsigned long)&waitq;
/*
ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
- if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
+ if (copy_to_user(argp + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
ret = -EFAULT;
kfree(databuf);
ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
- if (put_user(retlen, (uint32_t __user *)argp))
+ if (copy_to_user(argp + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
ret = -EFAULT;
else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
ret = -EFAULT;
case MEMLOCK:
{
- struct erase_info_user info;
+ unsigned long adrs[2];
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(adrs, argp, 2* sizeof(unsigned long)))
return -EFAULT;
if (!mtd->lock)
ret = -EOPNOTSUPP;
else
- ret = mtd->lock(mtd, info.start, info.length);
+ ret = mtd->lock(mtd, adrs[0], adrs[1]);
break;
}
case MEMUNLOCK:
{
- struct erase_info_user info;
+ unsigned long adrs[2];
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(adrs, argp, 2* sizeof(unsigned long)))
return -EFAULT;
if (!mtd->unlock)
ret = -EOPNOTSUPP;
else
- ret = mtd->unlock(mtd, info.start, info.length);
+ ret = mtd->unlock(mtd, adrs[0], adrs[1]);
break;
}
return -EFAULT;
break;
}
-
- case MEMGETOOBSEL:
- {
- if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
- return -EFAULT;
- break;
- }
-
- case MEMGETBADBLOCK:
- {
- loff_t offs;
- if (copy_from_user(&offs, argp, sizeof(loff_t)))
- return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
- break;
- }
-
- case MEMSETBADBLOCK:
- {
- loff_t offs;
-
- if (copy_from_user(&offs, argp, sizeof(loff_t)))
- return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
- break;
- }
-
default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %x)\n", cmd, MEMGETINFO);
ret = -ENOTTY;
}
.release = mtd_close,
};
+
+#ifdef CONFIG_DEVFS_FS
+/* Notification that a new device has been added. Create the devfs entry for
+ * it. */
+
+static void mtd_notify_add(struct mtd_info* mtd)
+{
+ if (!mtd)
+ return;
+ devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
+ S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
+ devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
+ S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%dro", mtd->index);
+}
+
+static void mtd_notify_remove(struct mtd_info* mtd)
+{
+ if (!mtd)
+ return;
+ devfs_remove("mtd/%d", mtd->index);
+ devfs_remove("mtd/%dro", mtd->index);
+}
+#endif
+
static int __init init_mtdchar(void)
{
if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
return -EAGAIN;
}
- mtdchar_devfs_init();
+#ifdef CONFIG_DEVFS_FS
+ devfs_mk_dir("mtd");
+
+ register_mtd_user(¬ifier);
+#endif
return 0;
}
static void __exit cleanup_mtdchar(void)
{
- mtdchar_devfs_exit();
+#ifdef CONFIG_DEVFS_FS
+ unregister_mtd_user(¬ifier);
+ devfs_remove("mtd");
+#endif
unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
}
*
* This code is GPL
*
- * $Id: mtdconcat.c,v 1.9 2004/06/30 15:17:41 dbrown Exp $
+ * $Id: mtdconcat.c,v 1.8 2003/06/30 11:01:26 dwmw2 Exp $
*/
#include <linux/module.h>
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_info *subdev;
int i, err;
- u_int32_t length, offset = 0;
+ u_int32_t length;
struct erase_info *erase;
if (!(mtd->flags & MTD_WRITEABLE))
return -EINVAL;
}
- instr->fail_addr = 0xffffffff;
-
/* make a local copy of instr to avoid modifying the caller's struct */
erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
*/
for (i = 0; i < concat->num_subdev; i++) {
subdev = concat->subdev[i];
- if (subdev->size <= erase->addr) {
+ if (subdev->size <= erase->addr)
erase->addr -= subdev->size;
- offset += subdev->size;
- } else {
+ else
break;
- }
}
/* must never happen since size limit has been verified above */
* block alignment has been checked above */
if (err == -EINVAL)
BUG();
- if (erase->fail_addr != 0xffffffff)
- instr->fail_addr = erase->fail_addr + offset;
break;
}
/*
* current subdevice, i.e. at offset zero.
*/
erase->addr = 0;
- offset += subdev->size;
}
- instr->state = erase->state;
kfree(erase);
if (err)
return err;
+ instr->state = MTD_ERASE_DONE;
if (instr->callback)
instr->callback(instr);
return 0;
/*
- * $Id: mtdcore.c,v 1.43 2004/07/23 15:20:46 dwmw2 Exp $
+ * $Id: mtdcore.c,v 1.39 2003/05/21 15:15:03 dwmw2 Exp $
*
* Core registration and callback routines for MTD
* drivers and users.
*
*/
+#include <linux/version.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
/* Support for /proc/mtd */
#ifdef CONFIG_PROC_FS
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
static struct proc_dir_entry *proc_mtd;
+#endif
static inline int mtd_proc_info (char *buf, int i)
{
this->erasesize, this->name);
}
-static int mtd_read_proc (char *page, char **start, off_t off, int count,
- int *eof, void *data_unused)
+static int mtd_read_proc ( char *page, char **start, off_t off,int count
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+ ,int *eof, void *data_unused
+#else
+ ,int unused
+#endif
+ )
{
int len, l, i;
off_t begin = 0;
}
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
*eof = 1;
+#endif
done:
up(&mtd_table_mutex);
return ((count < begin+len-off) ? count : begin+len-off);
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
+struct proc_dir_entry mtd_proc_entry = {
+ 0, /* low_ino: the inode -- dynamic */
+ 3, "mtd", /* len of name and name */
+ S_IFREG | S_IRUGO, /* mode */
+ 1, 0, 0, /* nlinks, owner, group */
+ 0, NULL, /* size - unused; operations -- use default */
+ &mtd_read_proc, /* function used to read data */
+ /* nothing more */
+ };
+#endif
+
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
int __init init_mtd(void)
{
#ifdef CONFIG_PROC_FS
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
- proc_mtd->read_proc = mtd_read_proc;
+ proc_mtd->read_proc = mtd_read_proc;
+#else
+ proc_register_dynamic(&proc_root,&mtd_proc_entry);
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < 0x20212
+ init_mtd_devices();
#endif
#ifdef CONFIG_PM
#endif
#ifdef CONFIG_PROC_FS
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
if (proc_mtd)
- remove_proc_entry( "mtd", NULL);
+ remove_proc_entry( "mtd", NULL);
+#else
+ proc_unregister(&proc_root,mtd_proc_entry.low_ino);
+#endif
#endif
}
*
* This code is GPL
*
- * $Id: mtdpart.c,v 1.50 2004/08/10 16:18:34 dwmw2 Exp $
+ * $Id: mtdpart.c,v 1.41 2003/06/18 14:53:02 dwmw2 Exp $
*
* 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
* added support for read_oob, write_oob
static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
- int ret;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
- return ret;
-}
-
-void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->mtd->erase == part_erase) {
- struct mtd_part *part = PART(instr->mtd);
-
- if (instr->fail_addr != 0xffffffff)
- instr->fail_addr -= part->offset;
- instr->addr -= part->offset;
- }
- if (instr->callback)
- instr->callback(instr);
+ return part->master->erase(part->master, instr);
}
-EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
{
part->master->resume(part->master);
}
-static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = PART(mtd);
- if (ofs >= mtd->size)
- return -EINVAL;
- ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
-}
-
-static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = PART(mtd);
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (ofs >= mtd->size)
- return -EINVAL;
- ofs += part->offset;
- return part->master->block_markbad(part->master, ofs);
-}
-
/*
* This function unregisters and destroy all slave MTD objects which are
* attached to the given master MTD object.
*/
int add_mtd_partitions(struct mtd_info *master,
- const struct mtd_partition *parts,
+ struct mtd_partition *parts,
int nbparts)
{
struct mtd_part *slave;
slave->mtd.lock = part_lock;
if (master->unlock)
slave->mtd.unlock = part_unlock;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = parts[i].offset;
parts[i].name);
}
- /* copy oobinfo from master */
- memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo));
-
if(parts[i].mtdp)
{ /* store the object pointer (caller may or may not register it */
*parts[i].mtdp = &slave->mtd;
# drivers/mtd/nand/Kconfig
-# $Id: Kconfig,v 1.17 2004/08/10 14:24:07 dwmw2 Exp $
+# $Id: Kconfig,v 1.4 2003/05/28 10:04:23 dwmw2 Exp $
menu "NAND Flash Device Drivers"
depends on MTD!=n
depends on MTD
help
This enables support for accessing all type of NAND flash
- devices. For further information see
+ devices with an 8-bit data bus interface. For further information see
<http://www.linux-mtd.infradead.org/tech/nand.html>.
config MTD_NAND_VERIFY_WRITE
help
If you had to ask, you don't have one. Say 'N'.
-config MTD_NAND_TOTO
- tristate "NAND Flash device on TOTO board"
- depends on ARM && ARCH_OMAP && MTD_NAND
- help
- Support for NAND flash on Texas Instruments Toto platform.
-
config MTD_NAND_IDS
tristate
default y if MTD_NAND = y || MTD_DOC2000 = y || MTD_DOC2001 = y || MTD_DOC2001PLUS = y
default m if MTD_NAND = m || MTD_DOC2000 = m || MTD_DOC2001 = m || MTD_DOC2001PLUS = m
-
-config MTD_NAND_TX4925NDFMC
- tristate "SmartMedia Card on Toshiba RBTX4925 reference board"
- depends on TOSHIBA_RBTX4925 && MTD_NAND && TOSHIBA_RBTX4925_MPLEX_NAND
- help
- This enables the driver for the NAND flash device found on the
- Toshiba RBTX4925 reference board, which is a SmartMediaCard.
-
-config MTD_NAND_TX4938NDFMC
- tristate "NAND Flash device on Toshiba RBTX4938 reference board"
- depends on TOSHIBA_RBTX4938 && MTD_NAND && TOSHIBA_RBTX4938_MPLEX_NAND
- help
- This enables the driver for the NAND flash device found on the
- Toshiba RBTX4938 reference board.
-
-config MTD_NAND_AU1550
- tristate "Au1550 NAND support"
- depends on SOC_AU1550 && MTD_NAND
- help
- This enables the driver for the NAND flash controller on the
- AMD/Alchemy 1550 SOC.
-
-config MTD_NAND_PPCHAMELEONEVB
- tristate "NAND Flash device on PPChameleonEVB board"
- depends on PPCHAMELEONEVB && MTD_NAND
- help
- This enables the NAND flash driver on the PPChameleon EVB Board.
-
-config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
- depends on MTD_NAND && EXPERIMENTAL
- help
- This is a reimplementation of M-Systems DiskOnChip 2000,
- Millennium and Millennium Plus as a standard NAND device driver,
- as opposed to the earlier self-contained MTD device drivers.
- This should enable, among other things, proper JFFS2 operation on
- these devices.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
-config MTD_NAND_DISKONCHIP_BBTWRITE
- bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
- depends on MTD_NAND_DISKONCHIP
- help
- On DiskOnChip devices shipped with the INFTL filesystem (Millennium
- and 2000 TSOP/Alon), Linux reserves some space at the end of the
- device for the Bad Block Table (BBT). If you have existing INFTL
- data on your device (created by non-Linux tools such as M-Systems'
- DOS drivers), your data might overlap the area Linux wants to use for
- the BBT. If this is a concern for you, leave this option disabled and
- Linux will not write BBT data into this area.
- The downside of leaving this option disabled is that if bad blocks
- are detected by Linux, they will not be recorded in the BBT, which
- could cause future problems.
- Once you enable this option, new filesystems (INFTL or others, created
- in Linux or other operating systems) will not use the reserved area.
- The only reason not to enable this option is to prevent damage to
- preexisting filesystems.
- Even if you leave this disabled, you can enable BBT writes at module
- load time (assuming you build diskonchip as a module) with the module
- parameter "inftl_bbt_write=1".
+
endmenu
+
#
# linux/drivers/nand/Makefile
#
-# $Id: Makefile.common,v 1.9 2004/07/12 16:07:31 dwmw2 Exp $
+# $Id: Makefile.common,v 1.2 2003/05/28 11:38:54 dwmw2 Exp $
-obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
-obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
-
-obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
-obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
-obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
-obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
-obj-$(CONFIG_MTD_NAND_TX4925NDFMC) += tx4925ndfmc.o
-obj-$(CONFIG_MTD_NAND_TX4938NDFMC) += tx4938ndfmc.o
-obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
-obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
-obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
-
-nand-objs = nand_base.o nand_bbt.o
+obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
+obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
+obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+++ /dev/null
-/*
- * drivers/mtd/nand/au1550nd.c
- *
- * Copyright (C) 2004 Embedded Edge, LLC
- *
- * $Id: au1550nd.c,v 1.5 2004/05/17 07:19:35 ppopov Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/au1000.h>
-#ifdef CONFIG_MIPS_PB1550
-#include <asm/pb1550.h>
-#endif
-#ifdef CONFIG_MIPS_DB1550
-#include <asm/db1x00.h>
-#endif
-
-
-/*
- * MTD structure for NAND controller
- */
-static struct mtd_info *au1550_mtd = NULL;
-static volatile u32 p_nand;
-static int nand_width = 1; /* default, only x8 supported for now */
-
-/* Internal buffers. Page buffer and oob buffer for one block*/
-static u_char data_buf[512 + 16];
-static u_char oob_buf[16 * 32];
-
-/*
- * Define partitions for flash device
- */
-const static struct mtd_partition partition_info[] = {
-#ifdef CONFIG_MIPS_PB1550
-#define NUM_PARTITIONS 2
- {
- .name = "Pb1550 NAND FS 0",
- .offset = 0,
- .size = 8*1024*1024
- },
- {
- .name = "Pb1550 NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL
- }
-#endif
-#ifdef CONFIG_MIPS_DB1550
-#define NUM_PARTITIONS 2
- {
- .name = "Db1550 NAND FS 0",
- .offset = 0,
- .size = 8*1024*1024
- },
- {
- .name = "Db1550 NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL
- }
-#endif
-};
-
-static inline void write_cmd_reg(u8 cmd)
-{
- if (nand_width)
- *((volatile u8 *)(p_nand + MEM_STNAND_CMD)) = cmd;
- else
- *((volatile u16 *)(p_nand + MEM_STNAND_CMD)) = cmd;
- au_sync();
-}
-
-static inline void write_addr_reg(u8 addr)
-{
- if (nand_width)
- *((volatile u8 *)(p_nand + MEM_STNAND_ADDR)) = addr;
- else
- *((volatile u16 *)(p_nand + MEM_STNAND_ADDR)) = addr;
- au_sync();
-}
-
-static inline void write_data_reg(u8 data)
-{
- if (nand_width)
- *((volatile u8 *)(p_nand + MEM_STNAND_DATA)) = data;
- else
- *((volatile u16 *)(p_nand + MEM_STNAND_DATA)) = data;
- au_sync();
-}
-
-static inline u32 read_data_reg(void)
-{
- u32 data;
- if (nand_width) {
- data = *((volatile u8 *)(p_nand + MEM_STNAND_DATA));
- au_sync();
- }
- else {
- data = *((volatile u16 *)(p_nand + MEM_STNAND_DATA));
- au_sync();
- }
- return data;
-}
-
-void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-}
-
-int au1550_device_ready(struct mtd_info *mtd)
-{
- int ready;
- ready = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
- return ready;
-}
-
-static u_char au1550_nand_read_byte(struct mtd_info *mtd)
-{
- u_char ret;
- ret = read_data_reg();
- return ret;
-}
-
-static void au1550_nand_write_byte(struct mtd_info *mtd, u_char byte)
-{
- write_data_reg((u8)byte);
-}
-
-static void
-au1550_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- write_data_reg(buf[i]);
-}
-
-static void
-au1550_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- buf[i] = (u_char)read_data_reg();
-}
-
-static int
-au1550_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- if (buf[i] != (u_char)read_data_reg())
- return -EFAULT;
-
- return 0;
-}
-
-static void au1550_nand_select_chip(struct mtd_info *mtd, int chip)
-{
- switch(chip) {
- case -1:
- /* deassert chip enable */
- au_writel(au_readl(MEM_STNDCTL) & ~0x20 , MEM_STNDCTL);
- break;
- case 0:
- /* assert (force assert) chip enable */
- au_writel(au_readl(MEM_STNDCTL) | 0x20 , MEM_STNDCTL);
- break;
-
- default:
- BUG();
- }
-}
-
-static void au1550_nand_command (struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
-{
- register struct nand_chip *this = mtd->priv;
-
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- write_cmd_reg(readcmd);
- }
- write_cmd_reg(command);
-
- if (column != -1 || page_addr != -1) {
-
- /* Serially input address */
- if (column != -1)
- write_addr_reg(column);
- if (page_addr != -1) {
- write_addr_reg((unsigned char) (page_addr & 0xff));
- write_addr_reg(((page_addr >> 8) & 0xff));
- /* One more address cycle for higher density devices */
- if (mtd->size & 0x0c000000)
- write_addr_reg((unsigned char) ((page_addr >> 16) & 0x0f));
- }
- }
-
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- break;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- udelay(this->chip_delay);
- write_cmd_reg(NAND_CMD_STATUS);
- while ( !(read_data_reg() & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- udelay (this->chip_delay);
- }
-
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-
-/*
- * Main initialization routine
- */
-int __init au1550_init (void)
-{
- struct nand_chip *this;
- u16 boot_swapboot = 0; /* default value */
- u32 mem_time;
-
- /* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc (sizeof(struct mtd_info) +
- sizeof (struct nand_chip), GFP_KERNEL);
- if (!au1550_mtd) {
- printk ("Unable to allocate NAND MTD dev structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&au1550_mtd[1]);
-
- /* Initialize structures */
- memset((char *) au1550_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- au1550_mtd->priv = this;
-
- /* disable interrupts */
- au_writel(au_readl(MEM_STNDCTL) & ~(1<<8), MEM_STNDCTL);
-
- /* disable NAND boot */
- au_writel(au_readl(MEM_STNDCTL) & ~(1<<0), MEM_STNDCTL);
-
-#ifdef CONFIG_MIPS_PB1550
- /* set gpio206 high */
- au_writel(au_readl(GPIO2_DIR) & ~(1<<6), GPIO2_DIR);
-
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) |
- ((bcsr->status >> 6) & 0x1);
- switch (boot_swapboot) {
- case 0:
- case 2:
- case 8:
- case 0xC:
- case 0xD:
- /* x16 NAND Flash */
- nand_width = 0;
- printk("Pb1550 NAND: 16-bit NAND not supported by MTD\n");
- break;
- case 1:
- case 9:
- case 3:
- case 0xE:
- case 0xF:
- /* x8 NAND Flash */
- nand_width = 1;
- break;
- default:
- printk("Pb1550 NAND: bad boot:swap\n");
- kfree(au1550_mtd);
- return 1;
- }
-
- /* Configure RCE1 - should be done by YAMON */
- au_writel(0x5 | (nand_width << 22), MEM_STCFG1);
- au_writel(NAND_TIMING, MEM_STTIME1);
- mem_time = au_readl(MEM_STTIME1);
- au_sync();
-
- /* setup and enable chip select */
- /* we really need to decode offsets only up till 0x20 */
- au_writel((1<<28) | (NAND_PHYS_ADDR>>4) |
- (((NAND_PHYS_ADDR + 0x1000)-1) & (0x3fff<<18)>>18),
- MEM_STADDR1);
- au_sync();
-#endif
-
-#ifdef CONFIG_MIPS_DB1550
- /* Configure RCE1 - should be done by YAMON */
- au_writel(0x00400005, MEM_STCFG1);
- au_writel(0x00007774, MEM_STTIME1);
- au_writel(0x12000FFF, MEM_STADDR1);
-#endif
-
- p_nand = (volatile struct nand_regs *)ioremap(NAND_PHYS_ADDR, 0x1000);
-
- /* Set address of hardware control function */
- this->hwcontrol = au1550_hwcontrol;
- this->dev_ready = au1550_device_ready;
- /* 30 us command delay time */
- this->chip_delay = 30;
-
- this->cmdfunc = au1550_nand_command;
- this->select_chip = au1550_nand_select_chip;
- this->write_byte = au1550_nand_write_byte;
- this->read_byte = au1550_nand_read_byte;
- this->write_buf = au1550_nand_write_buf;
- this->read_buf = au1550_nand_read_buf;
- this->verify_buf = au1550_nand_verify_buf;
- this->eccmode = NAND_ECC_SOFT;
-
- /* Set internal data buffer */
- this->data_buf = data_buf;
- this->oob_buf = oob_buf;
-
- /* Scan to find existence of the device */
- if (nand_scan (au1550_mtd, 1)) {
- kfree (au1550_mtd);
- return -ENXIO;
- }
-
- /* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, NUM_PARTITIONS);
-
- return 0;
-}
-
-module_init(au1550_init);
-
-/*
- * Clean up routine
- */
-#ifdef MODULE
-static void __exit au1550_cleanup (void)
-{
- struct nand_chip *this = (struct nand_chip *) &au1550_mtd[1];
-
- iounmap ((void *)p_nand);
-
- /* Unregister partitions */
- del_mtd_partitions(au1550_mtd);
-
- /* Unregister the device */
- del_mtd_device (au1550_mtd);
-
- /* Free the MTD device structure */
- kfree (au1550_mtd);
-}
-module_exit(au1550_cleanup);
-#endif
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Embedded Edge, LLC");
-MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
* Derived from drivers/mtd/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: autcpu12.c,v 1.20 2004/07/20 02:44:26 dwmw2 Exp $
+ * $Id: autcpu12.c,v 1.11 2003/06/04 17:04:09 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* Overview:
* This is a device driver for the NAND flash device found on the
* autronix autcpu12 board, which is a SmartMediaCard. It supports
- * 16MiB, 32MiB and 64MiB cards.
+ * 16MB, 32MB and 64MB cards.
*
*
* 02-12-2002 TG Cleanup of module params
*/
static struct mtd_info *autcpu12_mtd = NULL;
+/*
+ * Module stuff
+ */
+#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
+#define autcpu12_init init_module
+#define autcpu12_cleanup cleanup_module
+#endif
+
static int autcpu12_io_base = CS89712_VIRT_BASE;
static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
/*
* Define partitions for flash devices
*/
+extern struct nand_oobinfo jffs2_oobinfo;
+
static struct mtd_partition partition_info16k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 8 * SZ_1M },
};
static struct mtd_partition partition_info32k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 24 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 24 * SZ_1M },
};
static struct mtd_partition partition_info64k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 48 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 48 * SZ_1M },
};
static struct mtd_partition partition_info128k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 112 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 112 * SZ_1M },
};
#define NUM_PARTITIONS16K 2
/*
* hardware specific access to control-lines
*/
-static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd)
+void autcpu12_hwcontrol(int cmd)
{
switch(cmd){
/*
* read device ready pin
*/
-int autcpu12_device_ready(struct mtd_info *mtd)
+int autcpu12_device_ready(void)
{
return ( (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) & AUTCPU12_SMC_RDY) ? 1 : 0;
}
-
/*
* Main initialization routine
*/
this->chip_delay = 20;
this->eccmode = NAND_ECC_SOFT;
- /* Enable the following for a flash based bad block table */
- /*
- this->options = NAND_USE_FLASH_BBT;
- */
- this->options = NAND_USE_FLASH_BBT;
-
/* Scan to find existance of the device */
- if (nand_scan (autcpu12_mtd, 1)) {
+ if (nand_scan (autcpu12_mtd)) {
err = -ENXIO;
goto out_ior;
}
-
+
+ /* Allocate memory for internal data buffer */
+ this->data_buf = kmalloc (sizeof(u_char) * (autcpu12_mtd->oobblock + autcpu12_mtd->oobsize), GFP_KERNEL);
+ if (!this->data_buf) {
+ printk ("Unable to allocate NAND data buffer for AUTCPU12.\n");
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
/* Register the partitions */
switch(autcpu12_mtd->size){
case SZ_16M: add_mtd_partitions(autcpu12_mtd, partition_info16k, NUM_PARTITIONS16K); break;
default: {
printk ("Unsupported SmartMedia device\n");
err = -ENXIO;
- goto out_ior;
+ goto out_buf;
}
}
goto out;
+out_buf:
+ kfree (this->data_buf);
out_ior:
iounmap((void *)autcpu12_fio_base);
out_mtd:
#ifdef MODULE
static void __exit autcpu12_cleanup (void)
{
- /* Release resources, unregister device */
- nand_release (autcpu12_mtd);
+ struct nand_chip *this = (struct nand_chip *) &autcpu12_mtd[1];
+
+ /* Unregister partitions */
+ del_mtd_partitions(autcpu12_mtd);
+
+ /* Unregister the device */
+ del_mtd_device (autcpu12_mtd);
+
+ /* Free internal data buffers */
+ kfree (this->data_buf);
/* unmap physical adress */
iounmap((void *)autcpu12_fio_base);
-
+
/* Free the MTD device structure */
kfree (autcpu12_mtd);
}
+++ /dev/null
-/*
- * drivers/mtd/nand/diskonchip.c
- *
- * (C) 2003 Red Hat, Inc.
- * (C) 2004 Dan Brown <dan_brown@ieee.org>
- * (C) 2004 Kalev Lember <kalev@smartlink.ee>
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
- * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
- *
- * Interface to generic NAND code for M-Systems DiskOnChip devices
- *
- * $Id: diskonchip.c,v 1.34 2004/08/09 19:41:12 dbrown Exp $
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-#include <linux/mtd/compatmac.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/inftl.h>
-
-/* Where to look for the devices? */
-#ifndef CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS
-#define CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS 0
-#endif
-
-static unsigned long __initdata doc_locations[] = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DISKONCHIP_PROBE_HIGH
- 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
- 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
- 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
- 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
- 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
- 0xc8000, 0xca000, 0xcc000, 0xce000,
- 0xd0000, 0xd2000, 0xd4000, 0xd6000,
- 0xd8000, 0xda000, 0xdc000, 0xde000,
- 0xe0000, 0xe2000, 0xe4000, 0xe6000,
- 0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#elif defined(__PPC__)
- 0xe4000000,
-#elif defined(CONFIG_MOMENCO_OCELOT)
- 0x2f000000,
- 0xff000000,
-#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
- 0xff000000,
-##else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
-#endif
- 0xffffffff };
-
-static struct mtd_info *doclist = NULL;
-
-struct doc_priv {
- unsigned long virtadr;
- unsigned long physadr;
- u_char ChipID;
- u_char CDSNControl;
- int chips_per_floor; /* The number of chips detected on each floor */
- int curfloor;
- int curchip;
- int mh0_page;
- int mh1_page;
- struct mtd_info *nextdoc;
-};
-
-/* Max number of eraseblocks to scan (from start of device) for the (I)NFTL
- MediaHeader. The spec says to just keep going, I think, but that's just
- silly. */
-#define MAX_MEDIAHEADER_SCAN 8
-
-/* This is the syndrome computed by the HW ecc generator upon reading an empty
- page, one with all 0xff for data and stored ecc code. */
-static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
-/* This is the ecc value computed by the HW ecc generator upon writing an empty
- page, one with all 0xff for data. */
-static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
-
-#define INFTL_BBT_RESERVED_BLOCKS 4
-
-#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
-#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
-#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
-
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd);
-static void doc200x_select_chip(struct mtd_info *mtd, int chip);
-
-static int debug=0;
-MODULE_PARM(debug, "i");
-
-static int try_dword=1;
-MODULE_PARM(try_dword, "i");
-
-static int no_ecc_failures=0;
-MODULE_PARM(no_ecc_failures, "i");
-
-static int no_autopart=0;
-MODULE_PARM(no_autopart, "i");
-
-#ifdef MTD_NAND_DISKONCHIP_BBTWRITE
-static int inftl_bbt_write=1;
-#else
-static int inftl_bbt_write=0;
-#endif
-MODULE_PARM(inftl_bbt_write, "i");
-
-static unsigned long doc_config_location = CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS;
-MODULE_PARM(doc_config_location, "l");
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++) {
- if (DoC_is_Millennium(doc))
- dummy = ReadDOC(doc->virtadr, NOP);
- else if (DoC_is_MillenniumPlus(doc))
- dummy = ReadDOC(doc->virtadr, Mplus_NOP);
- else
- dummy = ReadDOC(doc->virtadr, DOCStatus);
- }
-
-}
-
-#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct doc_priv *doc)
-{
- unsigned long docptr = doc->virtadr;
- unsigned long timeo = jiffies + (HZ * 10);
-
- if(debug) printk("_DoC_WaitReady...\n");
- /* Out-of-line routine to wait for chip response */
- if (DoC_is_MillenniumPlus(doc)) {
- while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
- if (time_after(jiffies, timeo)) {
- printk("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
- } else {
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- if (time_after(jiffies, timeo)) {
- printk("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
- }
-
- return 0;
-}
-
-static inline int DoC_WaitReady(struct doc_priv *doc)
-{
- unsigned long docptr = doc->virtadr;
- int ret = 0;
-
- if (DoC_is_MillenniumPlus(doc)) {
- DoC_Delay(doc, 4);
-
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
- } else {
- DoC_Delay(doc, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
- DoC_Delay(doc, 2);
- }
-
- if(debug) printk("DoC_WaitReady OK\n");
- return ret;
-}
-
-static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- if(debug)printk("write_byte %02x\n", datum);
- WriteDOC(datum, docptr, CDSNSlowIO);
- WriteDOC(datum, docptr, 2k_CDSN_IO);
-}
-
-static u_char doc2000_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- u_char ret;
-
- ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- ret = ReadDOC(docptr, 2k_CDSN_IO);
- if (debug) printk("read_byte returns %02x\n", ret);
- return ret;
-}
-
-static void doc2000_writebuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
- if (debug)printk("writebuf of %d bytes: ", len);
- for (i=0; i < len; i++) {
- WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
- if (debug && i < 16)
- printk("%02x ", buf[i]);
- }
- if (debug) printk("\n");
-}
-
-static void doc2000_readbuf(struct mtd_info *mtd,
- u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("readbuf of %d bytes: ", len);
-
- for (i=0; i < len; i++) {
- buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
- }
-}
-
-static void doc2000_readbuf_dword(struct mtd_info *mtd,
- u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug) printk("readbuf_dword of %d bytes: ", len);
-
- if (unlikely((((unsigned long)buf)|len) & 3)) {
- for (i=0; i < len; i++) {
- *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
- }
- } else {
- for (i=0; i < len; i+=4) {
- *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
- }
- }
-}
-
-static int doc2000_verifybuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- for (i=0; i < len; i++)
- if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
- return -EFAULT;
- return 0;
-}
-
-static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- uint16_t ret;
-
- doc200x_select_chip(mtd, nr);
- doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
- this->write_byte(mtd, NAND_CMD_READID);
- doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
- doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
- this->write_byte(mtd, 0);
- doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
-
- ret = this->read_byte(mtd) << 8;
- ret |= this->read_byte(mtd);
-
- if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
- /* First chip probe. See if we get same results by 32-bit access */
- union {
- uint32_t dword;
- uint8_t byte[4];
- } ident;
- unsigned long docptr = doc->virtadr;
-
- doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
- doc2000_write_byte(mtd, NAND_CMD_READID);
- doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
- doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
- doc2000_write_byte(mtd, 0);
- doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
-
- ident.dword = readl(docptr + DoC_2k_CDSN_IO);
- if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
- printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
- this->read_buf = &doc2000_readbuf_dword;
- }
- }
-
- return ret;
-}
-
-static void __init doc2000_count_chips(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- uint16_t mfrid;
- int i;
-
- /* Max 4 chips per floor on DiskOnChip 2000 */
- doc->chips_per_floor = 4;
-
- /* Find out what the first chip is */
- mfrid = doc200x_ident_chip(mtd, 0);
-
- /* Find how many chips in each floor. */
- for (i = 1; i < 4; i++) {
- if (doc200x_ident_chip(mtd, i) != mfrid)
- break;
- }
- doc->chips_per_floor = i;
- printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
-}
-
-static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
-{
- struct doc_priv *doc = (void *)this->priv;
-
- int status;
-
- DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- DoC_WaitReady(doc);
- status = (int)this->read_byte(mtd);
-
- return status;
-}
-
-static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- WriteDOC(datum, docptr, CDSNSlowIO);
- WriteDOC(datum, docptr, Mil_CDSN_IO);
- WriteDOC(datum, docptr, WritePipeTerm);
-}
-
-static u_char doc2001_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- //ReadDOC(docptr, CDSNSlowIO);
- /* 11.4.5 -- delay twice to allow extended length cycle */
- DoC_Delay(doc, 2);
- ReadDOC(docptr, ReadPipeInit);
- //return ReadDOC(docptr, Mil_CDSN_IO);
- return ReadDOC(docptr, LastDataRead);
-}
-
-static void doc2001_writebuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- for (i=0; i < len; i++)
- WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
- /* Terminate write pipeline */
- WriteDOC(0x00, docptr, WritePipeTerm);
-}
-
-static void doc2001_readbuf(struct mtd_info *mtd,
- u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- /* Start read pipeline */
- ReadDOC(docptr, ReadPipeInit);
-
- for (i=0; i < len-1; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
-
- /* Terminate read pipeline */
- buf[i] = ReadDOC(docptr, LastDataRead);
-}
-
-static int doc2001_verifybuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- /* Start read pipeline */
- ReadDOC(docptr, ReadPipeInit);
-
- for (i=0; i < len-1; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, LastDataRead);
- return i;
- }
- if (buf[i] != ReadDOC(docptr, LastDataRead))
- return i;
- return 0;
-}
-
-static u_char doc2001plus_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- u_char ret;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ret = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug) printk("read_byte returns %02x\n", ret);
- return ret;
-}
-
-static void doc2001plus_writebuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("writebuf of %d bytes: ", len);
- for (i=0; i < len; i++) {
- WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
- if (debug && i < 16)
- printk("%02x ", buf[i]);
- }
- if (debug) printk("\n");
-}
-
-static void doc2001plus_readbuf(struct mtd_info *mtd,
- u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("readbuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i=0; i < len-2; i++) {
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
- if (debug && i < 16)
- printk("%02x ", buf[i]);
- }
-
- /* Terminate read pipeline */
- buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug && i < 16)
- printk("%02x ", buf[len-2]);
- buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug && i < 16)
- printk("%02x ", buf[len-1]);
- if (debug) printk("\n");
-}
-
-static int doc2001plus_verifybuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("verifybuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i=0; i < len-2; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, Mplus_LastDataRead);
- ReadDOC(docptr, Mplus_LastDataRead);
- return i;
- }
- if (buf[len-2] != ReadDOC(docptr, Mplus_LastDataRead))
- return len-2;
- if (buf[len-1] != ReadDOC(docptr, Mplus_LastDataRead))
- return len-1;
- return 0;
-}
-
-static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int floor = 0;
-
- if(debug)printk("select chip (%d)\n", chip);
-
- if (chip == -1) {
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
- return;
- }
-
- floor = chip / doc->chips_per_floor;
- chip -= (floor * doc->chips_per_floor);
-
- /* Assert ChipEnable and deassert WriteProtect */
- WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- doc->curchip = chip;
- doc->curfloor = floor;
-}
-
-static void doc200x_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int floor = 0;
-
- if(debug)printk("select chip (%d)\n", chip);
-
- if (chip == -1)
- return;
-
- floor = chip / doc->chips_per_floor;
- chip -= (floor * doc->chips_per_floor);
-
- /* 11.4.4 -- deassert CE before changing chip */
- doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE);
-
- WriteDOC(floor, docptr, FloorSelect);
- WriteDOC(chip, docptr, CDSNDeviceSelect);
-
- doc200x_hwcontrol(mtd, NAND_CTL_SETNCE);
-
- doc->curchip = chip;
- doc->curfloor = floor;
-}
-
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- switch(cmd) {
- case NAND_CTL_SETNCE:
- doc->CDSNControl |= CDSN_CTRL_CE;
- break;
- case NAND_CTL_CLRNCE:
- doc->CDSNControl &= ~CDSN_CTRL_CE;
- break;
- case NAND_CTL_SETCLE:
- doc->CDSNControl |= CDSN_CTRL_CLE;
- break;
- case NAND_CTL_CLRCLE:
- doc->CDSNControl &= ~CDSN_CTRL_CLE;
- break;
- case NAND_CTL_SETALE:
- doc->CDSNControl |= CDSN_CTRL_ALE;
- break;
- case NAND_CTL_CLRALE:
- doc->CDSNControl &= ~CDSN_CTRL_ALE;
- break;
- case NAND_CTL_SETWP:
- doc->CDSNControl |= CDSN_CTRL_WP;
- break;
- case NAND_CTL_CLRWP:
- doc->CDSNControl &= ~CDSN_CTRL_WP;
- break;
- }
- if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
- WriteDOC(doc->CDSNControl, docptr, CDSNControl);
- /* 11.4.3 -- 4 NOPs after CSDNControl write */
- DoC_Delay(doc, 4);
-}
-
-static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- /*
- * Must terminate write pipeline before sending any commands
- * to the device.
- */
- if (command == NAND_CMD_PAGEPROG) {
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- }
-
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- WriteDOC(readcmd, docptr, Mplus_FlashCmd);
- }
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
- if (column != -1 || page_addr != -1) {
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
- column >>= 1;
- WriteDOC(column, docptr, Mplus_FlashAddress);
- }
- if (page_addr != -1) {
- WriteDOC((unsigned char) (page_addr & 0xff), docptr, Mplus_FlashAddress);
- WriteDOC((unsigned char) ((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
- WriteDOC((unsigned char) ((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
- printk("high density\n");
- }
- }
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- /* deassert ALE */
- if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID)
- WriteDOC(0, docptr, Mplus_FlashControl);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- udelay(this->chip_delay);
- WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay (100);
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-static int doc200x_dev_ready(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- if (DoC_is_MillenniumPlus(doc)) {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
- if(debug)
- printk("not ready\n");
- return 0;
- }
- if (debug)printk("was ready\n");
- return 1;
- } else {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- if(debug)
- printk("not ready\n");
- return 0;
- }
- /* 11.4.2 -- Must NOP twice if it's ready */
- DoC_Delay(doc, 2);
- if (debug)printk("was ready\n");
- return 1;
- }
-}
-
-static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
-{
- /* This is our last resort if we couldn't find or create a BBT. Just
- pretend all blocks are good. */
- return 0;
-}
-
-static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- /* Prime the ECC engine */
- switch(mode) {
- case NAND_ECC_READ:
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
- break;
- case NAND_ECC_WRITE:
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
- break;
- }
-}
-
-static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- /* Prime the ECC engine */
- switch(mode) {
- case NAND_ECC_READ:
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
- break;
- case NAND_ECC_WRITE:
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
- break;
- }
-}
-
-/* This code is only called on write */
-static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- unsigned char *ecc_code)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
- int emptymatch = 1;
-
- /* flush the pipeline */
- if (DoC_is_2000(doc)) {
- WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
- WriteDOC(0, docptr, 2k_CDSN_IO);
- WriteDOC(0, docptr, 2k_CDSN_IO);
- WriteDOC(0, docptr, 2k_CDSN_IO);
- WriteDOC(doc->CDSNControl, docptr, CDSNControl);
- } else if (DoC_is_MillenniumPlus(doc)) {
- WriteDOC(0, docptr, Mplus_NOP);
- WriteDOC(0, docptr, Mplus_NOP);
- WriteDOC(0, docptr, Mplus_NOP);
- } else {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- }
-
- for (i = 0; i < 6; i++) {
- if (DoC_is_MillenniumPlus(doc))
- ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
- else
- ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
- if (ecc_code[i] != empty_write_ecc[i])
- emptymatch = 0;
- }
- if (DoC_is_MillenniumPlus(doc))
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
- else
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-#if 0
- /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
- if (emptymatch) {
- /* Note: this somewhat expensive test should not be triggered
- often. It could be optimized away by examining the data in
- the writebuf routine, and remembering the result. */
- for (i = 0; i < 512; i++) {
- if (dat[i] == 0xff) continue;
- emptymatch = 0;
- break;
- }
- }
- /* If emptymatch still =1, we do have an all-0xff data buffer.
- Return all-0xff ecc value instead of the computed one, so
- it'll look just like a freshly-erased page. */
- if (emptymatch) memset(ecc_code, 0xff, 6);
-#endif
- return 0;
-}
-
-static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
-{
- int i, ret = 0;
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- volatile u_char dummy;
- int emptymatch = 1;
-
- /* flush the pipeline */
- if (DoC_is_2000(doc)) {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- } else if (DoC_is_MillenniumPlus(doc)) {
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- } else {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- }
-
- /* Error occured ? */
- if (dummy & 0x80) {
- for (i = 0; i < 6; i++) {
- if (DoC_is_MillenniumPlus(doc))
- calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
- else
- calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
- if (calc_ecc[i] != empty_read_syndrome[i])
- emptymatch = 0;
- }
- /* If emptymatch=1, the read syndrome is consistent with an
- all-0xff data and stored ecc block. Check the stored ecc. */
- if (emptymatch) {
- for (i = 0; i < 6; i++) {
- if (read_ecc[i] == 0xff) continue;
- emptymatch = 0;
- break;
- }
- }
- /* If emptymatch still =1, check the data block. */
- if (emptymatch) {
- /* Note: this somewhat expensive test should not be triggered
- often. It could be optimized away by examining the data in
- the readbuf routine, and remembering the result. */
- for (i = 0; i < 512; i++) {
- if (dat[i] == 0xff) continue;
- emptymatch = 0;
- break;
- }
- }
- /* If emptymatch still =1, this is almost certainly a freshly-
- erased block, in which case the ECC will not come out right.
- We'll suppress the error and tell the caller everything's
- OK. Because it is. */
- if (!emptymatch) ret = doc_decode_ecc (dat, calc_ecc);
- if (ret > 0)
- printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
- }
- if (DoC_is_MillenniumPlus(doc))
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
- else
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- if (no_ecc_failures && (ret == -1)) {
- printk(KERN_ERR "suppressing ECC failure\n");
- ret = 0;
- }
- return ret;
-}
-
-//u_char mydatabuf[528];
-
-static struct nand_oobinfo doc200x_oobinfo = {
- .useecc = MTD_NANDECC_AUTOPLACE,
- .eccbytes = 6,
- .eccpos = {0, 1, 2, 3, 4, 5},
- .oobfree = { {8, 8} }
-};
-
-/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
- On sucessful return, buf will contain a copy of the media header for
- further processing. id is the string to scan for, and will presumably be
- either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
- header. The page #s of the found media headers are placed in mh0_page and
- mh1_page in the DOC private structure. */
-static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
- const char *id, int findmirror)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
- int ret;
- size_t retlen;
-
- end = min(end, mtd->size); // paranoia
- for (offs = 0; offs < end; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
- if (retlen != mtd->oobblock) continue;
- if (ret) {
- printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n",
- offs);
- }
- if (memcmp(buf, id, 6)) continue;
- printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
- if (doc->mh0_page == -1) {
- doc->mh0_page = offs >> this->page_shift;
- if (!findmirror) return 1;
- continue;
- }
- doc->mh1_page = offs >> this->page_shift;
- return 2;
- }
- if (doc->mh0_page == -1) {
- printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
- return 0;
- }
- /* Only one mediaheader was found. We want buf to contain a
- mediaheader on return, so we'll have to re-read the one we found. */
- offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
- if (retlen != mtd->oobblock) {
- /* Insanity. Give up. */
- printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
- return 0;
- }
- return 1;
-}
-
-static inline int __init nftl_partscan(struct mtd_info *mtd,
- struct mtd_partition *parts)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- int ret = 0;
- u_char *buf;
- struct NFTLMediaHeader *mh;
- const unsigned psize = 1 << this->page_shift;
- unsigned blocks, maxblocks;
- int offs, numheaders;
-
- buf = kmalloc(mtd->oobblock, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
- return 0;
- }
- if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) goto out;
- mh = (struct NFTLMediaHeader *) buf;
-
-//#ifdef CONFIG_MTD_DEBUG_VERBOSE
-// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- printk(KERN_INFO " DataOrgID = %s\n"
- " NumEraseUnits = %d\n"
- " FirstPhysicalEUN = %d\n"
- " FormattedSize = %d\n"
- " UnitSizeFactor = %d\n",
- mh->DataOrgID, mh->NumEraseUnits,
- mh->FirstPhysicalEUN, mh->FormattedSize,
- mh->UnitSizeFactor);
-//#endif
-
- blocks = mtd->size >> this->phys_erase_shift;
- maxblocks = min(32768U, mtd->erasesize - psize);
-
- if (mh->UnitSizeFactor == 0x00) {
- /* Auto-determine UnitSizeFactor. The constraints are:
- - There can be at most 32768 virtual blocks.
- - There can be at most (virtual block size - page size)
- virtual blocks (because MediaHeader+BBT must fit in 1).
- */
- mh->UnitSizeFactor = 0xff;
- while (blocks > maxblocks) {
- blocks >>= 1;
- maxblocks = min(32768U, (maxblocks << 1) + psize);
- mh->UnitSizeFactor--;
- }
- printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
- }
-
- /* NOTE: The lines below modify internal variables of the NAND and MTD
- layers; variables with have already been configured by nand_scan.
- Unfortunately, we didn't know before this point what these values
- should be. Thus, this code is somewhat dependant on the exact
- implementation of the NAND layer. */
- if (mh->UnitSizeFactor != 0xff) {
- this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
- mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
- printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
- blocks = mtd->size >> this->bbt_erase_shift;
- maxblocks = min(32768U, mtd->erasesize - psize);
- }
-
- if (blocks > maxblocks) {
- printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
- goto out;
- }
-
- /* Skip past the media headers. */
- offs = max(doc->mh0_page, doc->mh1_page);
- offs <<= this->page_shift;
- offs += mtd->erasesize;
-
- //parts[0].name = " DiskOnChip Boot / Media Header partition";
- //parts[0].offset = 0;
- //parts[0].size = offs;
-
- parts[0].name = " DiskOnChip BDTL partition";
- parts[0].offset = offs;
- parts[0].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
-
- offs += parts[0].size;
- if (offs < mtd->size) {
- parts[1].name = " DiskOnChip Remainder partition";
- parts[1].offset = offs;
- parts[1].size = mtd->size - offs;
- ret = 2;
- goto out;
- }
- ret = 1;
-out:
- kfree(buf);
- return ret;
-}
-
-/* This is a stripped-down copy of the code in inftlmount.c */
-static inline int __init inftl_partscan(struct mtd_info *mtd,
- struct mtd_partition *parts)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- int ret = 0;
- u_char *buf;
- struct INFTLMediaHeader *mh;
- struct INFTLPartition *ip;
- int numparts = 0;
- int blocks;
- int vshift, lastvunit = 0;
- int i;
- int end = mtd->size;
-
- if (inftl_bbt_write)
- end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
-
- buf = kmalloc(mtd->oobblock, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
- return 0;
- }
-
- if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out;
- doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
- mh = (struct INFTLMediaHeader *) buf;
-
- mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
- mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
- mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
- mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
- mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
- mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
-
-//#ifdef CONFIG_MTD_DEBUG_VERBOSE
-// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- printk(KERN_INFO " bootRecordID = %s\n"
- " NoOfBootImageBlocks = %d\n"
- " NoOfBinaryPartitions = %d\n"
- " NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
- " FormatFlgs = %d\n"
- " OsakVersion = %d.%d.%d.%d\n"
- " PercentUsed = %d\n",
- mh->bootRecordID, mh->NoOfBootImageBlocks,
- mh->NoOfBinaryPartitions,
- mh->NoOfBDTLPartitions,
- mh->BlockMultiplierBits, mh->FormatFlags,
- ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
- mh->PercentUsed);
-//#endif
-
- vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
-
- blocks = mtd->size >> vshift;
- if (blocks > 32768) {
- printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
- goto out;
- }
-
- blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
- if (inftl_bbt_write && (blocks > mtd->erasesize)) {
- printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
- goto out;
- }
-
- /* Scan the partitions */
- for (i = 0; (i < 4); i++) {
- ip = &(mh->Partitions[i]);
- ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
- ip->firstUnit = le32_to_cpu(ip->firstUnit);
- ip->lastUnit = le32_to_cpu(ip->lastUnit);
- ip->flags = le32_to_cpu(ip->flags);
- ip->spareUnits = le32_to_cpu(ip->spareUnits);
- ip->Reserved0 = le32_to_cpu(ip->Reserved0);
-
-//#ifdef CONFIG_MTD_DEBUG_VERBOSE
-// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
- printk(KERN_INFO " PARTITION[%d] ->\n"
- " virtualUnits = %d\n"
- " firstUnit = %d\n"
- " lastUnit = %d\n"
- " flags = 0x%x\n"
- " spareUnits = %d\n",
- i, ip->virtualUnits, ip->firstUnit,
- ip->lastUnit, ip->flags,
- ip->spareUnits);
-//#endif
-
-/*
- if ((i == 0) && (ip->firstUnit > 0)) {
- parts[0].name = " DiskOnChip IPL / Media Header partition";
- parts[0].offset = 0;
- parts[0].size = mtd->erasesize * ip->firstUnit;
- numparts = 1;
- }
-*/
-
- if (ip->flags & INFTL_BINARY)
- parts[numparts].name = " DiskOnChip BDK partition";
- else
- parts[numparts].name = " DiskOnChip BDTL partition";
- parts[numparts].offset = ip->firstUnit << vshift;
- parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
- numparts++;
- if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit;
- if (ip->flags & INFTL_LAST) break;
- }
- lastvunit++;
- if ((lastvunit << vshift) < end) {
- parts[numparts].name = " DiskOnChip Remainder partition";
- parts[numparts].offset = lastvunit << vshift;
- parts[numparts].size = end - parts[numparts].offset;
- numparts++;
- }
- ret = numparts;
-out:
- kfree(buf);
- return ret;
-}
-
-static int __init nftl_scan_bbt(struct mtd_info *mtd)
-{
- int ret, numparts;
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- struct mtd_partition parts[2];
-
- memset((char *) parts, 0, sizeof(parts));
- /* On NFTL, we have to find the media headers before we can read the
- BBTs, since they're stored in the media header eraseblocks. */
- numparts = nftl_partscan(mtd, parts);
- if (!numparts) return -EIO;
- this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
- NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
- NAND_BBT_VERSION;
- this->bbt_td->veroffs = 7;
- this->bbt_td->pages[0] = doc->mh0_page + 1;
- if (doc->mh1_page != -1) {
- this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
- NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
- NAND_BBT_VERSION;
- this->bbt_md->veroffs = 7;
- this->bbt_md->pages[0] = doc->mh1_page + 1;
- } else {
- this->bbt_md = NULL;
- }
-
- /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
- At least as nand_bbt.c is currently written. */
- if ((ret = nand_scan_bbt(mtd, NULL)))
- return ret;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
- return 0;
-}
-
-static int __init inftl_scan_bbt(struct mtd_info *mtd)
-{
- int ret, numparts;
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- struct mtd_partition parts[5];
-
- if (this->numchips > doc->chips_per_floor) {
- printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
- return -EIO;
- }
-
- if (DoC_is_MillenniumPlus(doc)) {
- this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
- if (inftl_bbt_write)
- this->bbt_td->options |= NAND_BBT_WRITE;
- this->bbt_td->pages[0] = 2;
- this->bbt_md = NULL;
- } else {
- this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
- NAND_BBT_VERSION;
- if (inftl_bbt_write)
- this->bbt_td->options |= NAND_BBT_WRITE;
- this->bbt_td->offs = 8;
- this->bbt_td->len = 8;
- this->bbt_td->veroffs = 7;
- this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
- this->bbt_td->reserved_block_code = 0x01;
- this->bbt_td->pattern = "MSYS_BBT";
-
- this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
- NAND_BBT_VERSION;
- if (inftl_bbt_write)
- this->bbt_md->options |= NAND_BBT_WRITE;
- this->bbt_md->offs = 8;
- this->bbt_md->len = 8;
- this->bbt_md->veroffs = 7;
- this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
- this->bbt_md->reserved_block_code = 0x01;
- this->bbt_md->pattern = "TBB_SYSM";
- }
-
- /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
- At least as nand_bbt.c is currently written. */
- if ((ret = nand_scan_bbt(mtd, NULL)))
- return ret;
- memset((char *) parts, 0, sizeof(parts));
- numparts = inftl_partscan(mtd, parts);
- /* At least for now, require the INFTL Media Header. We could probably
- do without it for non-INFTL use, since all it gives us is
- autopartitioning, but I want to give it more thought. */
- if (!numparts) return -EIO;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
- return 0;
-}
-
-static inline int __init doc2000_init(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
-
- this->write_byte = doc2000_write_byte;
- this->read_byte = doc2000_read_byte;
- this->write_buf = doc2000_writebuf;
- this->read_buf = doc2000_readbuf;
- this->verify_buf = doc2000_verifybuf;
- this->scan_bbt = nftl_scan_bbt;
-
- doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
- doc2000_count_chips(mtd);
- mtd->name = "DiskOnChip 2000 (NFTL Model)";
- return (4 * doc->chips_per_floor);
-}
-
-static inline int __init doc2001_init(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
-
- this->write_byte = doc2001_write_byte;
- this->read_byte = doc2001_read_byte;
- this->write_buf = doc2001_writebuf;
- this->read_buf = doc2001_readbuf;
- this->verify_buf = doc2001_verifybuf;
-
- ReadDOC(doc->virtadr, ChipID);
- ReadDOC(doc->virtadr, ChipID);
- ReadDOC(doc->virtadr, ChipID);
- if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
- /* It's not a Millennium; it's one of the newer
- DiskOnChip 2000 units with a similar ASIC.
- Treat it like a Millennium, except that it
- can have multiple chips. */
- doc2000_count_chips(mtd);
- mtd->name = "DiskOnChip 2000 (INFTL Model)";
- this->scan_bbt = inftl_scan_bbt;
- return (4 * doc->chips_per_floor);
- } else {
- /* Bog-standard Millennium */
- doc->chips_per_floor = 1;
- mtd->name = "DiskOnChip Millennium";
- this->scan_bbt = nftl_scan_bbt;
- return 1;
- }
-}
-
-static inline int __init doc2001plus_init(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
-
- this->write_byte = NULL;
- this->read_byte = doc2001plus_read_byte;
- this->write_buf = doc2001plus_writebuf;
- this->read_buf = doc2001plus_readbuf;
- this->verify_buf = doc2001plus_verifybuf;
- this->scan_bbt = inftl_scan_bbt;
- this->hwcontrol = NULL;
- this->select_chip = doc2001plus_select_chip;
- this->cmdfunc = doc2001plus_command;
- this->enable_hwecc = doc2001plus_enable_hwecc;
-
- doc->chips_per_floor = 1;
- mtd->name = "DiskOnChip Millennium Plus";
-
- return 1;
-}
-
-static inline int __init doc_probe(unsigned long physadr)
-{
- unsigned char ChipID;
- struct mtd_info *mtd;
- struct nand_chip *nand;
- struct doc_priv *doc;
- unsigned long virtadr;
- unsigned char save_control;
- unsigned char tmp, tmpb, tmpc;
- int reg, len, numchips;
- int ret = 0;
-
- virtadr = (unsigned long)ioremap(physadr, DOC_IOREMAP_LEN);
- if (!virtadr) {
- printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
- return -EIO;
- }
-
- /* It's not possible to cleanly detect the DiskOnChip - the
- * bootup procedure will put the device into reset mode, and
- * it's not possible to talk to it without actually writing
- * to the DOCControl register. So we store the current contents
- * of the DOCControl register's location, in case we later decide
- * that it's not a DiskOnChip, and want to put it back how we
- * found it.
- */
- save_control = ReadDOC(virtadr, DOCControl);
-
- /* Reset the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- virtadr, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- virtadr, DOCControl);
-
- /* Enable the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- virtadr, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- virtadr, DOCControl);
-
- ChipID = ReadDOC(virtadr, ChipID);
-
- switch(ChipID) {
- case DOC_ChipID_Doc2k:
- reg = DoC_2k_ECCStatus;
- break;
- case DOC_ChipID_DocMil:
- reg = DoC_ECCConf;
- break;
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- case 0:
- /* Possible Millennium Plus, need to do more checks */
- /* Possibly release from power down mode */
- for (tmp = 0; (tmp < 4); tmp++)
- ReadDOC(virtadr, Mplus_Power);
-
- /* Reset the Millennium Plus ASIC */
- tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, virtadr, Mplus_DOCControl);
- WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
-
- mdelay(1);
- /* Enable the Millennium Plus ASIC */
- tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, virtadr, Mplus_DOCControl);
- WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
- mdelay(1);
-
- ChipID = ReadDOC(virtadr, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_DocMilPlus16:
- reg = DoC_Mplus_Toggle;
- break;
- case DOC_ChipID_DocMilPlus32:
- printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
- default:
- ret = -ENODEV;
- goto notfound;
- }
- break;
-
- default:
- ret = -ENODEV;
- goto notfound;
- }
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
- if ((tmp == tmpb) || (tmp != tmpc)) {
- printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
- ret = -ENODEV;
- goto notfound;
- }
-
- for (mtd = doclist; mtd; mtd = doc->nextdoc) {
- unsigned char oldval;
- unsigned char newval;
- nand = mtd->priv;
- doc = (void *)nand->priv;
- /* Use the alias resolution register to determine if this is
- in fact the same DOC aliased to a new address. If writes
- to one chip's alias resolution register change the value on
- the other chip, they're the same chip. */
- if (ChipID == DOC_ChipID_DocMilPlus16) {
- oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
- newval = ReadDOC(virtadr, Mplus_AliasResolution);
- } else {
- oldval = ReadDOC(doc->virtadr, AliasResolution);
- newval = ReadDOC(virtadr, AliasResolution);
- }
- if (oldval != newval)
- continue;
- if (ChipID == DOC_ChipID_DocMilPlus16) {
- WriteDOC(~newval, virtadr, Mplus_AliasResolution);
- oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
- WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
- } else {
- WriteDOC(~newval, virtadr, AliasResolution);
- oldval = ReadDOC(doc->virtadr, AliasResolution);
- WriteDOC(newval, virtadr, AliasResolution); // restore it
- }
- newval = ~newval;
- if (oldval == newval) {
- printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
- goto notfound;
- }
- }
-
- printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
-
- len = sizeof(struct mtd_info) +
- sizeof(struct nand_chip) +
- sizeof(struct doc_priv) +
- (2 * sizeof(struct nand_bbt_descr));
- mtd = kmalloc(len, GFP_KERNEL);
- if (!mtd) {
- printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
- ret = -ENOMEM;
- goto fail;
- }
- memset(mtd, 0, len);
-
- nand = (struct nand_chip *) (mtd + 1);
- doc = (struct doc_priv *) (nand + 1);
- nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
- nand->bbt_md = nand->bbt_td + 1;
-
- mtd->priv = (void *) nand;
- mtd->owner = THIS_MODULE;
-
- nand->priv = (void *) doc;
- nand->select_chip = doc200x_select_chip;
- nand->hwcontrol = doc200x_hwcontrol;
- nand->dev_ready = doc200x_dev_ready;
- nand->waitfunc = doc200x_wait;
- nand->block_bad = doc200x_block_bad;
- nand->enable_hwecc = doc200x_enable_hwecc;
- nand->calculate_ecc = doc200x_calculate_ecc;
- nand->correct_data = doc200x_correct_data;
- //nand->data_buf
- nand->autooob = &doc200x_oobinfo;
- nand->eccmode = NAND_ECC_HW6_512;
- nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME;
-
- doc->physadr = physadr;
- doc->virtadr = virtadr;
- doc->ChipID = ChipID;
- doc->curfloor = -1;
- doc->curchip = -1;
- doc->mh0_page = -1;
- doc->mh1_page = -1;
- doc->nextdoc = doclist;
-
- if (ChipID == DOC_ChipID_Doc2k)
- numchips = doc2000_init(mtd);
- else if (ChipID == DOC_ChipID_DocMilPlus16)
- numchips = doc2001plus_init(mtd);
- else
- numchips = doc2001_init(mtd);
-
- if ((ret = nand_scan(mtd, numchips))) {
- /* DBB note: i believe nand_release is necessary here, as
- buffers may have been allocated in nand_base. Check with
- Thomas. FIX ME! */
- /* nand_release will call del_mtd_device, but we haven't yet
- added it. This is handled without incident by
- del_mtd_device, as far as I can tell. */
- nand_release(mtd);
- kfree(mtd);
- goto fail;
- }
-
- /* Success! */
- doclist = mtd;
- return 0;
-
-notfound:
- /* Put back the contents of the DOCControl register, in case it's not
- actually a DiskOnChip. */
- WriteDOC(save_control, virtadr, DOCControl);
-fail:
- iounmap((void *)virtadr);
- return ret;
-}
-
-int __init init_nanddoc(void)
-{
- int i;
-
- if (doc_config_location) {
- printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
- return doc_probe(doc_config_location);
- } else {
- for (i=0; (doc_locations[i] != 0xffffffff); i++) {
- doc_probe(doc_locations[i]);
- }
- }
- /* No banner message any more. Print a message if no DiskOnChip
- found, so the user knows we at least tried. */
- if (!doclist) {
- printk(KERN_INFO "No valid DiskOnChip devices found\n");
- return -ENODEV;
- }
- return 0;
-}
-
-void __exit cleanup_nanddoc(void)
-{
- struct mtd_info *mtd, *nextmtd;
- struct nand_chip *nand;
- struct doc_priv *doc;
-
- for (mtd = doclist; mtd; mtd = nextmtd) {
- nand = mtd->priv;
- doc = (void *)nand->priv;
-
- nextmtd = doc->nextdoc;
- nand_release(mtd);
- iounmap((void *)doc->virtadr);
- kfree(mtd);
- }
-}
-
-module_init(init_nanddoc);
-module_exit(cleanup_nanddoc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver\n");
* Derived from drivers/mtd/nand/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: edb7312.c,v 1.8 2004/07/12 15:03:26 dwmw2 Exp $
+ * $Id: edb7312.c,v 1.5 2003/04/20 07:24:40 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
/*
* hardware specific access to control-lines
*/
-static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd)
+static void ep7312_hwcontrol(int cmd)
{
switch(cmd) {
/*
* read device ready pin
*/
-static int ep7312_device_ready(struct mtd_info *mtd)
+static int ep7312_device_ready(void)
{
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
this->chip_delay = 15;
/* Scan to find existence of the device */
- if (nand_scan (ep7312_mtd, 1)) {
+ if (nand_scan (ep7312_mtd)) {
iounmap((void *)ep7312_fio_base);
kfree (ep7312_mtd);
return -ENXIO;
return -ENOMEM;
}
-#ifdef CONFIG_PARTITIONS
- ep7312_mtd->name = "edb7312-nand";
- mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes,
- &mtd_parts, 0);
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ mtd_parts_nb = parse_cmdline_partitions(ep7312_mtd, &mtd_parts,
+ "edb7312-nand");
if (mtd_parts_nb > 0)
- part_type = "command line";
+ part_type = "command line";
else
- mtd_parts_nb = 0;
+ mtd_parts_nb = 0;
#endif
- if (mtd_parts_nb == 0) {
+ if (mtd_parts_nb == 0)
+ {
mtd_parts = partition_info;
mtd_parts_nb = NUM_PARTITIONS;
part_type = "static";
--- /dev/null
+/*
+ * drivers/mtd/nand.c
+ *
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/tech/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * 10-29-2001 Thomas Gleixner (tglx@linutronix.de)
+ * - Changed nand_chip structure for controlline function to
+ * support different hardware structures (Access to
+ * controllines ALE,CLE,NCE via hardware specific function.
+ * - exit out of "failed erase block" changed, to avoid
+ * driver hangup
+ * - init_waitqueue_head added in function nand_scan !!
+ *
+ * 01-30-2002 Thomas Gleixner (tglx@linutronix.de)
+ * change in nand_writev to block invalid vecs entries
+ *
+ * 02-11-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - major rewrite to avoid duplicated code
+ * common nand_write_page function
+ * common get_chip function
+ * - added oob_config structure for out of band layouts
+ * - write_oob changed for partial programming
+ * - read cache for faster access for subsequent reads
+ * from the same page.
+ * - support for different read/write address
+ * - support for device ready/busy line
+ * - read oob for more than one page enabled
+ *
+ * 02-27-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - command-delay can be programmed
+ * - fixed exit from erase with callback-function enabled
+ *
+ * 03-21-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - DEBUG improvements provided by Elizabeth Clarke
+ * (eclarke@aminocom.com)
+ * - added zero check for this->chip_delay
+ *
+ * 04-03-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - added added hw-driver supplied command and wait functions
+ * - changed blocking for erase (erase suspend enabled)
+ * - check pointers before accessing flash provided by
+ * John Hall (john.hall@optionexist.co.uk)
+ *
+ * 04-09-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - nand_wait repaired
+ *
+ * 04-28-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - OOB config defines moved to nand.h
+ *
+ * 08-01-2002 Thomas Gleixner (tglx@linutronix.de)
+ * - changed my mailaddress, added pointer to tech/nand.html
+ *
+ * 08-07-2002 Thomas Gleixner (tglx@linutronix.de)
+ * forced bad block location to byte 5 of OOB, even if
+ * CONFIG_MTD_NAND_ECC_JFFS2 is not set, to prevent
+ * erase /dev/mtdX from erasing bad blocks and destroying
+ * bad block info
+ *
+ * 08-10-2002 Thomas Gleixner (tglx@linutronix.de)
+ * Fixed writing tail of data. Thanks to Alice Hennessy
+ * <ahennessy@mvista.com>.
+ *
+ * 08-10-2002 Thomas Gleixner (tglx@linutronix.de)
+ * nand_read_ecc and nand_write_page restructured to support
+ * hardware ECC. Thanks to Steven Hein (ssh@sgi.com)
+ * for basic implementation and suggestions.
+ * 3 new pointers in nand_chip structure:
+ * calculate_ecc, correct_data, enabled_hwecc
+ * forcing all hw-drivers to support page cache
+ * eccvalid_pos is now mandatory
+ *
+ * 08-17-2002 tglx: fixed signed/unsigned missmatch in write.c
+ * Thanks to Ken Offer <koffer@arlut.utexas.edu>
+ *
+ * 08-29-2002 tglx: use buffered read/write only for non pagealigned
+ * access, speed up the aligned path by using the fs-buffer
+ * reset chip removed from nand_select(), implicit done
+ * only, when erase is interrupted
+ * waitfuntion use yield, instead of schedule_timeout
+ * support for 6byte/512byte hardware ECC
+ * read_ecc, write_ecc extended for different oob-layout
+ * selections: Implemented NAND_NONE_OOB, NAND_JFFS2_OOB,
+ * NAND_YAFFS_OOB. fs-driver gives one of these constants
+ * to select the oob-layout fitting the filesystem.
+ * oobdata can be read together with the raw data, when
+ * the fs-driver supplies a big enough buffer.
+ * size = 12 * number of pages to read (256B pagesize)
+ * 24 * number of pages to read (512B pagesize)
+ * the buffer contains 8/16 byte oobdata and 4/8 byte
+ * returncode from calculate_ecc
+ * oobdata can be given from filesystem to program them
+ * in one go together with the raw data. ECC codes are
+ * filled in at the place selected by oobsel.
+ *
+ * 09-04-2002 tglx: fixed write_verify (John Hall (john.hall@optionexist.co.uk))
+ *
+ * 11-11-2002 tglx: fixed debug output in nand_write_page
+ * (John Hall (john.hall@optionexist.co.uk))
+ *
+ * 11-25-2002 tglx: Moved device ID/ manufacturer ID from nand_ids.h
+ * Splitted device ID and manufacturer ID table.
+ * Removed CONFIG_MTD_NAND_ECC, as it defaults to ECC_NONE for
+ * mtd->read / mtd->write and is controllable by the fs driver
+ * for mtd->read_ecc / mtd->write_ecc
+ * some minor cleanups
+ *
+ * 12-05-2002 tglx: Dave Ellis (DGE@sixnetio) provided the fix for
+ * WRITE_VERIFY long time ago. Thanks for remembering me.
+ *
+ * 02-14-2003 tglx: Reject non page aligned writes
+ * Fixed ecc select in nand_write_page to match semantics.
+ *
+ * 02-18-2003 tglx: Changed oobsel to pointer. Added a default oob-selector
+ *
+ * 02-18-2003 tglx: Implemented oobsel again. Now it uses a pointer to
+ + a structure, which will be supplied by a filesystem driver
+ * If NULL is given, then the defaults (none or defaults
+ * supplied by ioctl (MEMSETOOBSEL) are used.
+ * For partitions the partition defaults are used (mtdpart.c)
+ *
+ * 06-04-2003 tglx: fix compile errors and fix write verify problem for
+ * some chips, which need either a delay between the readback
+ * and the next write command or have the CE removed. The
+ * CE disable/enable is much faster than a 20us delay and
+ * it should work on all available chips.
+ *
+ * $Id: nand.c,v 1.46 2003/06/04 17:10:36 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+/*
+ * Macros for low-level register control
+ */
+#define nand_select() this->hwcontrol(NAND_CTL_SETNCE);
+#define nand_deselect() this->hwcontrol(NAND_CTL_CLRNCE);
+
+/*
+ * NAND low-level MTD interface functions
+ */
+static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
+static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
+static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
+static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf);
+static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
+static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf);
+static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen);
+static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
+static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
+static void nand_sync (struct mtd_info *mtd);
+static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf, struct nand_oobinfo *oobsel);
+
+
+/*
+ * Send command to NAND device
+ */
+static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+ register unsigned long NAND_IO_ADDR = this->IO_ADDR_W;
+
+ /* Begin command latch cycle */
+ this->hwcontrol (NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command != NAND_CMD_SEQIN)
+ writeb (command, NAND_IO_ADDR);
+ else {
+ if (mtd->oobblock == 256 && column >= 256) {
+ column -= 256;
+ writeb (NAND_CMD_READOOB, NAND_IO_ADDR);
+ writeb (NAND_CMD_SEQIN, NAND_IO_ADDR);
+ } else if (mtd->oobblock == 512 && column >= 256) {
+ if (column < 512) {
+ column -= 256;
+ writeb (NAND_CMD_READ1, NAND_IO_ADDR);
+ writeb (NAND_CMD_SEQIN, NAND_IO_ADDR);
+ } else {
+ column -= 512;
+ writeb (NAND_CMD_READOOB, NAND_IO_ADDR);
+ writeb (NAND_CMD_SEQIN, NAND_IO_ADDR);
+ }
+ } else {
+ writeb (NAND_CMD_READ0, NAND_IO_ADDR);
+ writeb (NAND_CMD_SEQIN, NAND_IO_ADDR);
+ }
+ }
+
+ /* Set ALE and clear CLE to start address cycle */
+ this->hwcontrol (NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol (NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1)
+ writeb (column, NAND_IO_ADDR);
+ if (page_addr != -1) {
+ writeb ((unsigned char) (page_addr & 0xff), NAND_IO_ADDR);
+ writeb ((unsigned char) ((page_addr >> 8) & 0xff), NAND_IO_ADDR);
+ /* One more address cycle for higher density devices */
+ if (mtd->size & 0x0c000000)
+ writeb ((unsigned char) ((page_addr >> 16) & 0x0f), NAND_IO_ADDR);
+ }
+ /* Latch in address */
+ this->hwcontrol (NAND_CTL_CLRALE);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ this->hwcontrol (NAND_CTL_SETCLE);
+ writeb (NAND_CMD_STATUS, NAND_IO_ADDR);
+ this->hwcontrol (NAND_CTL_CLRCLE);
+ while ( !(readb (this->IO_ADDR_R) & 0x40));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+
+ /* wait until command is processed */
+ while (!this->dev_ready());
+}
+
+/*
+ * Get chip for selected access
+ */
+static inline void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state, int *erase_state)
+{
+
+ DECLARE_WAITQUEUE (wait, current);
+
+ /*
+ * Grab the lock and see if the device is available
+ * For erasing, we keep the spinlock until the
+ * erase command is written.
+ */
+retry:
+ spin_lock_bh (&this->chip_lock);
+
+ if (this->state == FL_READY) {
+ this->state = new_state;
+ if (new_state != FL_ERASING)
+ spin_unlock_bh (&this->chip_lock);
+ return;
+ }
+
+ if (this->state == FL_ERASING) {
+ if (new_state != FL_ERASING) {
+ this->state = new_state;
+ spin_unlock_bh (&this->chip_lock);
+ nand_select (); /* select in any case */
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ return;
+ }
+ }
+
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ add_wait_queue (&this->wq, &wait);
+ spin_unlock_bh (&this->chip_lock);
+ schedule ();
+ remove_wait_queue (&this->wq, &wait);
+ goto retry;
+}
+
+/*
+ * Wait for command done. This applies to erase and program only
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ *
+*/
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
+{
+
+ unsigned long timeo = jiffies;
+ int status;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ spin_lock_bh (&this->chip_lock);
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+
+ while (time_before(jiffies, timeo)) {
+ /* Check, if we were interrupted */
+ if (this->state != state) {
+ spin_unlock_bh (&this->chip_lock);
+ return 0;
+ }
+ if (this->dev_ready) {
+ if (this->dev_ready ())
+ break;
+ }
+ if (readb (this->IO_ADDR_R) & 0x40)
+ break;
+
+ spin_unlock_bh (&this->chip_lock);
+ yield ();
+ spin_lock_bh (&this->chip_lock);
+ }
+ status = (int) readb (this->IO_ADDR_R);
+ spin_unlock_bh (&this->chip_lock);
+
+ return status;
+}
+
+/*
+ * Nand_page_program function is used for write and writev !
+ * This function will always program a full page of data
+ * If you call it with a non page aligned buffer, you're lost :)
+ */
+static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf, struct nand_oobinfo *oobsel)
+{
+ int i, status;
+ u_char ecc_code[6], *oob_data;
+ int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
+ int *oob_config = oobsel->eccpos;
+
+ /* pad oob area, if we have no oob buffer from fs-driver */
+ if (!oob_buf) {
+ oob_data = &this->data_buf[mtd->oobblock];
+ for (i = 0; i < mtd->oobsize; i++)
+ oob_data[i] = 0xff;
+ } else
+ oob_data = oob_buf;
+
+ /* Send command to begin auto page programming */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ /* Write out complete page of data, take care of eccmode */
+ switch (eccmode) {
+ /* No ecc and software ecc 3/256, write all */
+ case NAND_ECC_NONE:
+ printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n");
+ for (i = 0; i < mtd->oobblock; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+ break;
+ case NAND_ECC_SOFT:
+ this->calculate_ecc (&this->data_poi[0], &(ecc_code[0]));
+ for (i = 0; i < 3; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+ /* Calculate and write the second ECC for 512 Byte page size */
+ if (mtd->oobblock == 512) {
+ this->calculate_ecc (&this->data_poi[256], &(ecc_code[3]));
+ for (i = 3; i < 6; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+ }
+ for (i = 0; i < mtd->oobblock; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+ break;
+
+ /* Hardware ecc 3 byte / 256 data, write first half, get ecc, then second, if 512 byte pagesize */
+ case NAND_ECC_HW3_256:
+ this->enable_hwecc (NAND_ECC_WRITE); /* enable hardware ecc logic for write */
+ for (i = 0; i < mtd->eccsize; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+
+ this->calculate_ecc (NULL, &(ecc_code[0]));
+ for (i = 0; i < 3; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+
+ if (mtd->oobblock == 512) {
+ this->enable_hwecc (NAND_ECC_WRITE); /* enable hardware ecc logic for write*/
+ for (i = mtd->eccsize; i < mtd->oobblock; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+ this->calculate_ecc (NULL, &(ecc_code[3]));
+ for (i = 3; i < 6; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+ }
+ break;
+
+ /* Hardware ecc 3 byte / 512 byte data, write full page */
+ case NAND_ECC_HW3_512:
+ this->enable_hwecc (NAND_ECC_WRITE); /* enable hardware ecc logic */
+ for (i = 0; i < mtd->oobblock; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+ this->calculate_ecc (NULL, &(ecc_code[0]));
+ for (i = 0; i < 3; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+ break;
+
+ /* Hardware ecc 6 byte / 512 byte data, write full page */
+ case NAND_ECC_HW6_512:
+ this->enable_hwecc (NAND_ECC_WRITE); /* enable hardware ecc logic */
+ for (i = 0; i < mtd->oobblock; i++)
+ writeb ( this->data_poi[i] , this->IO_ADDR_W);
+ this->calculate_ecc (NULL, &(ecc_code[0]));
+ for (i = 0; i < 6; i++)
+ oob_data[oob_config[i]] = ecc_code[i];
+ break;
+
+ default:
+ printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
+ BUG();
+ }
+
+ /* Write out OOB data */
+ for (i = 0; i < mtd->oobsize; i++)
+ writeb ( oob_data[i] , this->IO_ADDR_W);
+
+ /* Send command to actually program the data */
+ this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* call wait ready function */
+ status = this->waitfunc (mtd, this, FL_WRITING);
+
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page);
+ return -EIO;
+ }
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /*
+ * The NAND device assumes that it is always writing to
+ * a cleanly erased page. Hence, it performs its internal
+ * write verification only on bits that transitioned from
+ * 1 to 0. The device does NOT verify the whole page on a
+ * byte by byte basis. It is possible that the page was
+ * not completely erased or the page is becoming unusable
+ * due to wear. The read with ECC would catch the error
+ * later when the ECC page check fails, but we would rather
+ * catch it early in the page write stage. Better to write
+ * no data than invalid data.
+ */
+
+ /* Send command to read back the page */
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0, page);
+ /* Loop through and verify the data */
+ for (i = 0; i < mtd->oobblock; i++) {
+ if (this->data_poi[i] != readb (this->IO_ADDR_R)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
+ return -EIO;
+ }
+ }
+
+ /* check, if we have a fs-supplied oob-buffer */
+ if (oob_buf) {
+ for (i = 0; i < mtd->oobsize; i++) {
+ if (oob_data[i] != readb (this->IO_ADDR_R)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
+ return -EIO;
+ }
+ }
+ } else {
+ if (eccmode != NAND_ECC_NONE) {
+ int ecc_bytes = 0;
+
+ switch (this->eccmode) {
+ case NAND_ECC_SOFT:
+ case NAND_ECC_HW3_256: ecc_bytes = (mtd->oobblock == 512) ? 6 : 3; break;
+ case NAND_ECC_HW3_512: ecc_bytes = 3; break;
+ case NAND_ECC_HW6_512: ecc_bytes = 6; break;
+ }
+
+ for (i = 0; i < mtd->oobsize; i++)
+ oob_data[i] = readb (this->IO_ADDR_R);
+
+ for (i = 0; i < ecc_bytes; i++) {
+ if (oob_data[oob_config[i]] != ecc_code[i]) {
+ DEBUG (MTD_DEBUG_LEVEL0,
+ "%s: Failed ECC write "
+ "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i);
+ return -EIO;
+ }
+ }
+ }
+ }
+ /*
+ * Terminate the read command. This is faster than sending a reset command or
+ * applying a 20us delay before issuing the next programm sequence.
+ * This is not a problem for all chips, but I have found a bunch of them.
+ */
+ nand_deselect();
+ nand_select();
+#endif
+ return 0;
+}
+
+/*
+* Use NAND read ECC
+*/
+static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
+{
+ return (nand_read_ecc (mtd, from, len, retlen, buf, NULL, NULL));
+}
+
+
+/*
+ * NAND read with ECC
+ */
+static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel)
+{
+ int j, col, page, end, ecc;
+ int erase_state = 0;
+ int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0;
+ struct nand_chip *this = mtd->priv;
+ u_char *data_poi, *oob_data = oob_buf;
+ u_char ecc_calc[6];
+ u_char ecc_code[6];
+ int eccmode;
+ int *oob_config;
+
+ // use chip default if zero
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+
+ eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
+ oob_config = oobsel->eccpos;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n");
+ *retlen = 0;
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd ,FL_READING, &erase_state);
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* First we calculate the starting page */
+ page = from >> this->page_shift;
+
+ /* Get raw starting column */
+ col = from & (mtd->oobblock - 1);
+
+ end = mtd->oobblock;
+ ecc = mtd->eccsize;
+
+ /* Send the read command */
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
+
+ /* Loop until all data read */
+ while (read < len) {
+
+ /* If we have consequent page reads, apply delay or wait for ready/busy pin */
+ if (read) {
+ if (!this->dev_ready)
+ udelay (this->chip_delay);
+ else
+ while (!this->dev_ready());
+ }
+
+ /*
+ * If the read is not page aligned, we have to read into data buffer
+ * due to ecc, else we read into return buffer direct
+ */
+ if (!col && (len - read) >= end)
+ data_poi = &buf[read];
+ else
+ data_poi = this->data_buf;
+
+ /* get oob area, if we have no oob buffer from fs-driver */
+ if (!oob_buf) {
+ oob_data = &this->data_buf[end];
+ oob = 0;
+ }
+
+ j = 0;
+ switch (eccmode) {
+ case NAND_ECC_NONE: /* No ECC, Read in a page */
+ printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n");
+ while (j < end)
+ data_poi[j++] = readb (this->IO_ADDR_R);
+ break;
+
+ case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */
+ while (j < end)
+ data_poi[j++] = readb (this->IO_ADDR_R);
+ this->calculate_ecc (&data_poi[0], &ecc_calc[0]);
+ if (mtd->oobblock == 512)
+ this->calculate_ecc (&data_poi[256], &ecc_calc[3]);
+ break;
+
+ case NAND_ECC_HW3_256: /* Hardware ECC 3 byte /256 byte data: Read in first 256 byte, get ecc, */
+ this->enable_hwecc (NAND_ECC_READ);
+ while (j < ecc)
+ data_poi[j++] = readb (this->IO_ADDR_R);
+ this->calculate_ecc (&data_poi[0], &ecc_calc[0]); /* read from hardware */
+
+ if (mtd->oobblock == 512) { /* read second, if pagesize = 512 */
+ this->enable_hwecc (NAND_ECC_READ);
+ while (j < end)
+ data_poi[j++] = readb (this->IO_ADDR_R);
+ this->calculate_ecc (&data_poi[256], &ecc_calc[3]); /* read from hardware */
+ }
+ break;
+
+ case NAND_ECC_HW3_512:
+ case NAND_ECC_HW6_512: /* Hardware ECC 3/6 byte / 512 byte data : Read in a page */
+ this->enable_hwecc (NAND_ECC_READ);
+ while (j < end)
+ data_poi[j++] = readb (this->IO_ADDR_R);
+ this->calculate_ecc (&data_poi[0], &ecc_calc[0]); /* read from hardware */
+ break;
+
+ default:
+ printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
+ BUG();
+ }
+
+ /* read oobdata */
+ for (j = 0; j < mtd->oobsize; j++)
+ oob_data[oob + j] = readb (this->IO_ADDR_R);
+
+ /* Skip ECC, if not active */
+ if (eccmode == NAND_ECC_NONE)
+ goto readdata;
+
+ /* Pick the ECC bytes out of the oob data */
+ for (j = 0; j < 6; j++)
+ ecc_code[j] = oob_data[oob + oob_config[j]];
+
+ /* correct data, if neccecary */
+ ecc_status = this->correct_data (&data_poi[0], &ecc_code[0], &ecc_calc[0]);
+ /* check, if we have a fs supplied oob-buffer */
+ if (oob_buf) {
+ oob += mtd->oobsize;
+ *((int *)&oob_data[oob]) = ecc_status;
+ oob += sizeof(int);
+ }
+ if (ecc_status == -1) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page);
+ ecc_failed++;
+ }
+
+ if (mtd->oobblock == 512 && eccmode != NAND_ECC_HW3_512) {
+ ecc_status = this->correct_data (&data_poi[256], &ecc_code[3], &ecc_calc[3]);
+ if (oob_buf) {
+ *((int *)&oob_data[oob]) = ecc_status;
+ oob += sizeof(int);
+ }
+ if (ecc_status == -1) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page);
+ ecc_failed++;
+ }
+ }
+readdata:
+ if (col || (len - read) < end) {
+ for (j = col; j < end && read < len; j++)
+ buf[read++] = data_poi[j];
+ } else
+ read += mtd->oobblock;
+ /* For subsequent reads align to page boundary. */
+ col = 0;
+ /* Increment page address */
+ page++;
+ }
+
+ /* De-select the NAND device */
+ nand_deselect ();
+
+ /* Wake up anyone waiting on the device */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ spin_unlock_bh (&this->chip_lock);
+
+ /*
+ * Return success, if no ECC failures, else -EIO
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EIO
+ */
+ *retlen = read;
+ return ecc_failed ? -EIO : 0;
+}
+
+/*
+ * NAND read out-of-band
+ */
+static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
+{
+ int i, col, page;
+ int erase_state = 0;
+ struct nand_chip *this = mtd->priv;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ /* Shift to get page */
+ page = ((int) from) >> this->page_shift;
+
+ /* Mask to get column */
+ col = from & 0x0f;
+
+ /* Initialize return length value */
+ *retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n");
+ *retlen = 0;
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd , FL_READING, &erase_state);
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* Send the read command */
+ this->cmdfunc (mtd, NAND_CMD_READOOB, col, page);
+ /*
+ * Read the data, if we read more than one page
+ * oob data, let the device transfer the data !
+ */
+ for (i = 0; i < len; i++) {
+ buf[i] = readb (this->IO_ADDR_R);
+ if ((col++ & (mtd->oobsize - 1)) == (mtd->oobsize - 1))
+ udelay (this->chip_delay);
+ }
+ /* De-select the NAND device */
+ nand_deselect ();
+
+ /* Wake up anyone waiting on the device */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ spin_unlock_bh (&this->chip_lock);
+
+ /* Return happy */
+ *retlen = len;
+ return 0;
+}
+
+#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0
+
+/*
+* Use NAND write ECC
+*/
+static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
+{
+ return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL));
+}
+/*
+ * NAND write with ECC
+ */
+static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
+{
+ int page, ret = 0, oob = 0, written = 0;
+ struct nand_chip *this = mtd->priv;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+
+ /* Do not allow write past end of device */
+ if ((to + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ /* reject writes, which are not page aligned */
+ if (NOTALIGNED (to) || NOTALIGNED(len)) {
+ printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ // if oobsel is NULL, use chip defaults
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+
+ /* Shift to get page */
+ page = ((int) to) >> this->page_shift;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd, FL_WRITING, NULL);
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* Check the WP bit */
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(readb (this->IO_ADDR_R) & 0x80)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Device is write protected!!!\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Loop until all data is written */
+ while (written < len) {
+ int cnt = mtd->oobblock;
+ this->data_poi = (u_char*) &buf[written];
+ /* We use the same function for write and writev */
+ if (eccbuf) {
+ ret = nand_write_page (mtd, this, page, &eccbuf[oob], oobsel);
+ oob += mtd->oobsize;
+ } else
+ ret = nand_write_page (mtd, this, page, NULL, oobsel);
+
+ if (ret)
+ goto out;
+
+ /* Update written bytes count */
+ written += cnt;
+ /* Increment page address */
+ page++;
+ }
+
+out:
+ /* De-select the NAND device */
+ nand_deselect ();
+
+ /* Wake up anyone waiting on the device */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ spin_unlock_bh (&this->chip_lock);
+
+ *retlen = written;
+ return ret;
+}
+
+/*
+ * NAND write out-of-band
+ */
+static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
+{
+ int i, column, page, status, ret = 0;
+ struct nand_chip *this = mtd->priv;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+
+ /* Shift to get page */
+ page = ((int) to) >> this->page_shift;
+
+ /* Mask to get column */
+ column = to & 0x1f;
+
+ /* Initialize return length value */
+ *retlen = 0;
+
+ /* Do not allow write past end of page */
+ if ((column + len) > mtd->oobsize) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd, FL_WRITING, NULL);
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* Check the WP bit */
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(readb (this->IO_ADDR_R) & 0x80)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Device is write protected!!!\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Write out desired data */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page);
+ /* prepad 0xff for partial programming */
+ for (i = 0; i < column; i++)
+ writeb (0xff, this->IO_ADDR_W);
+ /* write data */
+ for (i = 0; i < len; i++)
+ writeb (buf[i], this->IO_ADDR_W);
+ /* postpad 0xff for partial programming */
+ for (i = len + column; i < mtd->oobsize; i++)
+ writeb (0xff, this->IO_ADDR_W);
+
+ /* Send command to program the OOB data */
+ this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = this->waitfunc (mtd, this, FL_WRITING);
+
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page);
+ ret = -EIO;
+ goto out;
+ }
+ /* Return happy */
+ *retlen = len;
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ /* Send command to read back the data */
+ this->cmdfunc (mtd, NAND_CMD_READOOB, column, page);
+
+ /* Loop through and verify the data */
+ for (i = 0; i < len; i++) {
+ if (buf[i] != readb (this->IO_ADDR_R)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page);
+ ret = -EIO;
+ goto out;
+ }
+ }
+#endif
+
+out:
+ /* De-select the NAND device */
+ nand_deselect ();
+
+ /* Wake up anyone waiting on the device */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ spin_unlock_bh (&this->chip_lock);
+
+ return ret;
+}
+
+
+/*
+ * NAND write with kvec
+ */
+static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
+ loff_t to, size_t * retlen)
+{
+ return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL));
+}
+
+static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
+ loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ int i, page, len, total_len, ret = 0, written = 0;
+ struct nand_chip *this = mtd->priv;
+
+ /* Calculate total length of data */
+ total_len = 0;
+ for (i = 0; i < count; i++)
+ total_len += (int) vecs[i].iov_len;
+
+ DEBUG (MTD_DEBUG_LEVEL3,
+ "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count);
+
+ /* Do not allow write past end of page */
+ if ((to + total_len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n");
+ return -EINVAL;
+ }
+
+ /* reject writes, which are not page aligned */
+ if (NOTALIGNED (to) || NOTALIGNED(total_len)) {
+ printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ // if oobsel is NULL, use chip defaults
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+
+ /* Shift to get page */
+ page = ((int) to) >> this->page_shift;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd, FL_WRITING, NULL);
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* Check the WP bit */
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(readb (this->IO_ADDR_R) & 0x80)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Device is write protected!!!\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Loop until all kvec' data has been written */
+ len = 0;
+ while (count) {
+ /*
+ * Check, if the tuple gives us not enough data for a
+ * full page write. Then we can use the iov direct,
+ * else we have to copy into data_buf.
+ */
+ if ((vecs->iov_len - len) >= mtd->oobblock) {
+ this->data_poi = vecs->iov_base;
+ this->data_poi += len;
+ len += mtd->oobblock;
+ /* Check, if we have to switch to the next tuple */
+ if (len >= (int) vecs->iov_len) {
+ vecs++;
+ len = 0;
+ count--;
+ }
+ } else {
+ /*
+ * Read data out of each tuple until we have a full page
+ * to write or we've read all the tuples.
+ */
+ int cnt = 0;
+ while ((cnt < mtd->oobblock) && count) {
+ if (vecs->iov_base != NULL && vecs->iov_len) {
+ this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++];
+ }
+ /* Check, if we have to switch to the next tuple */
+ if (len >= (int) vecs->iov_len) {
+ vecs++;
+ len = 0;
+ count--;
+ }
+ }
+ this->data_poi = this->data_buf;
+ }
+
+ /* We use the same function for write and writev !) */
+ ret = nand_write_page (mtd, this, page, NULL, oobsel);
+ if (ret)
+ goto out;
+
+ /* Update written bytes count */
+ written += mtd->oobblock;
+
+ /* Increment page address */
+ page++;
+ }
+
+out:
+ /* De-select the NAND device */
+ nand_deselect ();
+
+ /* Wake up anyone waiting on the device */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ spin_unlock_bh (&this->chip_lock);
+
+ *retlen = written;
+ return ret;
+}
+
+/*
+ * NAND erase a block
+ */
+static int nand_erase (struct mtd_info *mtd, struct erase_info *instr)
+{
+ int page, len, status, pages_per_block, ret;
+ struct nand_chip *this = mtd->priv;
+ DECLARE_WAITQUEUE (wait, current);
+
+ DEBUG (MTD_DEBUG_LEVEL3,
+ "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
+
+ /* Start address must align on block boundary */
+ if (instr->addr & (mtd->erasesize - 1)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (instr->len & (mtd->erasesize - 1)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow erase past end of device */
+ if ((instr->len + instr->addr) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_chip (this, mtd, FL_ERASING, NULL);
+
+ /* Shift to get first page */
+ page = (int) (instr->addr >> this->page_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = mtd->erasesize / mtd->oobblock;
+
+ /* Select the NAND device */
+ nand_select ();
+
+ /* Check the WP bit */
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(readb (this->IO_ADDR_R) & 0x80)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n");
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks ! */
+ this->cmdfunc (mtd, NAND_CMD_READOOB, NAND_BADBLOCK_POS, page);
+ if (readb (this->IO_ADDR_R) != 0xff) {
+ printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Send commands to erase a page */
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
+ this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
+
+ spin_unlock_bh (&this->chip_lock);
+ status = this->waitfunc (mtd, this, FL_ERASING);
+
+ /* Get spinlock, in case we exit */
+ spin_lock_bh (&this->chip_lock);
+ /* See if block erase succeeded */
+ if (status & 0x01) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Check, if we were interupted */
+ if (this->state == FL_ERASING) {
+ /* Increment page address and decrement length */
+ len -= mtd->erasesize;
+ page += pages_per_block;
+ }
+ /* Release the spin lock */
+ spin_unlock_bh (&this->chip_lock);
+erase_retry:
+ spin_lock_bh (&this->chip_lock);
+ /* Check the state and sleep if it changed */
+ if (this->state == FL_ERASING || this->state == FL_READY) {
+ /* Select the NAND device again, if we were interrupted */
+ this->state = FL_ERASING;
+ nand_select ();
+ continue;
+ } else {
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ add_wait_queue (&this->wq, &wait);
+ spin_unlock_bh (&this->chip_lock);
+ schedule ();
+ remove_wait_queue (&this->wq, &wait);
+ goto erase_retry;
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+ /* De-select the NAND device */
+ nand_deselect ();
+ spin_unlock_bh (&this->chip_lock);
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+ /* Do call back function */
+ if (!ret && instr->callback)
+ instr->callback (instr);
+
+ /* The device is ready */
+ spin_lock_bh (&this->chip_lock);
+ this->state = FL_READY;
+ spin_unlock_bh (&this->chip_lock);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/*
+ * NAND sync
+ */
+static void nand_sync (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ DECLARE_WAITQUEUE (wait, current);
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n");
+
+retry:
+ /* Grab the spinlock */
+ spin_lock_bh (&this->chip_lock);
+
+ /* See what's going on */
+ switch (this->state) {
+ case FL_READY:
+ case FL_SYNCING:
+ this->state = FL_SYNCING;
+ spin_unlock_bh (&this->chip_lock);
+ break;
+
+ default:
+ /* Not an idle state */
+ add_wait_queue (&this->wq, &wait);
+ spin_unlock_bh (&this->chip_lock);
+ schedule ();
+
+ remove_wait_queue (&this->wq, &wait);
+ goto retry;
+ }
+
+ /* Lock the device */
+ spin_lock_bh (&this->chip_lock);
+
+ /* Set the device to be ready again */
+ if (this->state == FL_SYNCING) {
+ this->state = FL_READY;
+ wake_up (&this->wq);
+ }
+
+ /* Unlock the device */
+ spin_unlock_bh (&this->chip_lock);
+}
+
+/*
+ * Scan for the NAND device
+ */
+int nand_scan (struct mtd_info *mtd)
+{
+ int i, nand_maf_id, nand_dev_id;
+ struct nand_chip *this = mtd->priv;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!this->chip_delay)
+ this->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (this->cmdfunc == NULL)
+ this->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (this->waitfunc == NULL)
+ this->waitfunc = nand_wait;
+
+ /* Select the device */
+ nand_select ();
+
+ /* Send the command for reading device ID */
+ this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ nand_maf_id = readb (this->IO_ADDR_R);
+ nand_dev_id = readb (this->IO_ADDR_R);
+
+ /* Print and store flash device information */
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (nand_dev_id == nand_flash_ids[i].id && !mtd->size) {
+ mtd->name = nand_flash_ids[i].name;
+ mtd->erasesize = nand_flash_ids[i].erasesize;
+ mtd->size = (1 << nand_flash_ids[i].chipshift);
+ mtd->eccsize = 256;
+ if (nand_flash_ids[i].page256) {
+ mtd->oobblock = 256;
+ mtd->oobsize = 8;
+ this->page_shift = 8;
+ } else {
+ mtd->oobblock = 512;
+ mtd->oobsize = 16;
+ this->page_shift = 9;
+ }
+ /* Try to identify manufacturer */
+ for (i = 0; nand_manuf_ids[i].id != 0x0; i++) {
+ if (nand_manuf_ids[i].id == nand_maf_id)
+ break;
+ }
+ printk (KERN_INFO "NAND device: Manufacture ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
+ nand_manuf_ids[i].name , mtd->name);
+ break;
+ }
+ }
+
+ /*
+ * check ECC mode, default to software
+ * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize
+ * fallback to software ECC
+ */
+ this->eccsize = 256; /* set default eccsize */
+
+ switch (this->eccmode) {
+
+ case NAND_ECC_HW3_512:
+ if (mtd->oobblock == 256) {
+ printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n");
+ this->eccmode = NAND_ECC_SOFT;
+ this->calculate_ecc = nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ break;
+ } else
+ this->eccsize = 512; /* set eccsize to 512 and fall through for function check */
+
+ case NAND_ECC_HW3_256:
+ if (this->calculate_ecc && this->correct_data && this->enable_hwecc)
+ break;
+ printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n");
+ BUG();
+
+ case NAND_ECC_NONE:
+ printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n");
+ this->eccmode = NAND_ECC_NONE;
+ break;
+
+ case NAND_ECC_SOFT:
+ this->calculate_ecc = nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ break;
+
+ default:
+ printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
+ BUG();
+ }
+
+ /* Initialize state, waitqueue and spinlock */
+ this->state = FL_READY;
+ init_waitqueue_head (&this->wq);
+ spin_lock_init (&this->chip_lock);
+
+ /* De-select the device */
+ nand_deselect ();
+
+ /* Print warning message for no device */
+ if (!mtd->size) {
+ printk (KERN_WARNING "No NAND device found!!!\n");
+ return 1;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC;
+ mtd->ecctype = MTD_ECC_SW;
+ mtd->erase = nand_erase;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = nand_read;
+ mtd->write = nand_write;
+ mtd->read_ecc = nand_read_ecc;
+ mtd->write_ecc = nand_write_ecc;
+ mtd->read_oob = nand_read_oob;
+ mtd->write_oob = nand_write_oob;
+ mtd->readv = NULL;
+ mtd->writev = nand_writev;
+ mtd->writev_ecc = nand_writev_ecc;
+ mtd->sync = nand_sync;
+ mtd->lock = NULL;
+ mtd->unlock = NULL;
+ mtd->suspend = NULL;
+ mtd->resume = NULL;
+ mtd->owner = THIS_MODULE;
+
+ /* Return happy */
+ return 0;
+}
+
+EXPORT_SYMBOL (nand_scan);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION ("Generic NAND flash driver code");
+++ /dev/null
-/*
- * drivers/mtd/nand.c
- *
- * Overview:
- * This is the generic MTD driver for NAND flash devices. It should be
- * capable of working with almost all NAND chips currently available.
- * Basic support for AG-AND chips is provided.
- *
- * Additional technical information is available on
- * http://www.linux-mtd.infradead.org/tech/nand.html
- *
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- * 2002 Thomas Gleixner (tglx@linutronix.de)
- *
- * 02-08-2004 tglx: support for strange chips, which cannot auto increment
- * pages on read / read_oob
- *
- * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes
- * pointed this out, as he marked an auto increment capable chip
- * as NOAUTOINCR in the board driver.
- * Make reads over block boundaries work too
- *
- * 04-14-2004 tglx: first working version for 2k page size chips
- *
- * 05-19-2004 tglx: Basic support for Renesas AG-AND chips
- *
- * Credits:
- * David Woodhouse for adding multichip support
- *
- * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
- * rework for 2K page size chips
- *
- * TODO:
- * Enable cached programming for 2k page size chips
- * Check, if mtd->ecctype should be set to MTD_ECC_HW
- * if we have HW ecc support.
- * The AG-AND chips have nice features for speed improvement,
- * which are not supported yet. Read / program 4 pages in one go.
- *
- * $Id: nand_base.c,v 1.115 2004/08/09 13:19:45 dwmw2 Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/compatmac.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
-#include <linux/mtd/partitions.h>
-#endif
-
-/* Define default oob placement schemes for large and small page devices */
-static struct nand_oobinfo nand_oob_8 = {
- .useecc = MTD_NANDECC_AUTOPLACE,
- .eccbytes = 3,
- .eccpos = {0, 1, 2},
- .oobfree = { {3, 2}, {6, 2} }
-};
-
-static struct nand_oobinfo nand_oob_16 = {
- .useecc = MTD_NANDECC_AUTOPLACE,
- .eccbytes = 6,
- .eccpos = {0, 1, 2, 3, 6, 7},
- .oobfree = { {8, 8} }
-};
-
-static struct nand_oobinfo nand_oob_64 = {
- .useecc = MTD_NANDECC_AUTOPLACE,
- .eccbytes = 24,
- .eccpos = {
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = { {2, 38} }
-};
-
-/* This is used for padding purposes in nand_write_oob */
-static u_char ffchars[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-};
-
-/*
- * NAND low-level MTD interface functions
- */
-static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len);
-static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len);
-static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len);
-
-static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
-static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
-static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
-static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf);
-static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
- size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
-static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf);
-static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t * retlen);
-static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
-static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
-static void nand_sync (struct mtd_info *mtd);
-
-/* Some internal functions */
-static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf,
- struct nand_oobinfo *oobsel, int mode);
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
-static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
- u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode);
-#else
-#define nand_verify_pages(...) (0)
-#endif
-
-static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state);
-
-/**
- * nand_release_chip - [GENERIC] release chip
- * @mtd: MTD device structure
- *
- * Deselect, release chip lock and wake up anyone waiting on the device
- */
-static void nand_release_chip (struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
-
- /* De-select the NAND device */
- this->select_chip(mtd, -1);
- /* Release the chip */
- spin_lock_bh (&this->chip_lock);
- this->state = FL_READY;
- wake_up (&this->wq);
- spin_unlock_bh (&this->chip_lock);
-}
-
-/**
- * nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 8bit buswith
- */
-static u_char nand_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- return readb(this->IO_ADDR_R);
-}
-
-/**
- * nand_write_byte - [DEFAULT] write one byte to the chip
- * @mtd: MTD device structure
- * @byte: pointer to data byte to write
- *
- * Default write function for 8it buswith
- */
-static void nand_write_byte(struct mtd_info *mtd, u_char byte)
-{
- struct nand_chip *this = mtd->priv;
- writeb(byte, this->IO_ADDR_W);
-}
-
-/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswith with
- * endianess conversion
- */
-static u_char nand_read_byte16(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- return (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
-}
-
-/**
- * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip
- * @mtd: MTD device structure
- * @byte: pointer to data byte to write
- *
- * Default write function for 16bit buswith with
- * endianess conversion
- */
-static void nand_write_byte16(struct mtd_info *mtd, u_char byte)
-{
- struct nand_chip *this = mtd->priv;
- writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
-}
-
-/**
- * nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswith without
- * endianess conversion
- */
-static u16 nand_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- return readw(this->IO_ADDR_R);
-}
-
-/**
- * nand_write_word - [DEFAULT] write one word to the chip
- * @mtd: MTD device structure
- * @word: data word to write
- *
- * Default write function for 16bit buswith without
- * endianess conversion
- */
-static void nand_write_word(struct mtd_info *mtd, u16 word)
-{
- struct nand_chip *this = mtd->priv;
- writew(word, this->IO_ADDR_W);
-}
-
-/**
- * nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chip: chipnumber to select, -1 for deselect
- *
- * Default select function for 1 chip devices.
- */
-static void nand_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
- switch(chip) {
- case -1:
- this->hwcontrol(mtd, NAND_CTL_CLRNCE);
- break;
- case 0:
- this->hwcontrol(mtd, NAND_CTL_SETNCE);
- break;
-
- default:
- BUG();
- }
-}
-
-/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 8bit buswith
- */
-static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- writeb(buf[i], this->IO_ADDR_W);
-}
-
-/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 8bit buswith
- */
-static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- buf[i] = readb(this->IO_ADDR_R);
-}
-
-/**
- * nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * Default verify function for 8bit buswith
- */
-static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- if (buf[i] != readb(this->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 16bit buswith
- */
-static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i=0; i<len; i++)
- writew(p[i], this->IO_ADDR_W);
-
-}
-
-/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 16bit buswith
- */
-static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i=0; i<len; i++)
- p[i] = readw(this->IO_ADDR_R);
-}
-
-/**
- * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * Default verify function for 16bit buswith
- */
-static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i=0; i<len; i++)
- if (p[i] != readw(this->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
- *
- * Check, if the block is bad.
- */
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
-{
- int page, chipnr, res = 0;
- struct nand_chip *this = mtd->priv;
- u16 bad;
-
- if (getchip) {
- page = (int)(ofs >> this->page_shift);
- chipnr = (int)(ofs >> this->chip_shift);
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd, FL_READING);
-
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
- } else
- page = (int) ofs;
-
- if (this->options & NAND_BUSWIDTH_16) {
- this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask);
- bad = cpu_to_le16(this->read_word(mtd));
- if (this->badblockpos & 0x1)
- bad >>= 1;
- if ((bad & 0xFF) != 0xff)
- res = 1;
- } else {
- this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask);
- if (this->read_byte(mtd) != 0xff)
- res = 1;
- }
-
- if (getchip) {
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
- }
-
- return res;
-}
-
-/**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * This is the default implementation, which can be overridden by
- * a hardware specific driver.
-*/
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *this = mtd->priv;
- u_char buf[2] = {0, 0};
- size_t retlen;
- int block;
-
- /* Get block number */
- block = ((int) ofs) >> this->bbt_erase_shift;
- this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
-
- /* Do we have a flash based bad block table ? */
- if (this->options & NAND_USE_FLASH_BBT)
- return nand_update_bbt (mtd, ofs);
-
- /* We write two bytes, so we dont have to mess with 16 bit access */
- ofs += mtd->oobsize + (this->badblockpos & ~0x01);
- return nand_write_oob (mtd, ofs , 2, &retlen, buf);
-}
-
-/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- * Check, if the device is write protected
- *
- * The function expects, that the device is already selected
- */
-static int nand_check_wp (struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- /* Check the WP bit */
- this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
- return (this->read_byte(mtd) & 0x80) ? 0 : 1;
-}
-
-/**
- * nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
- * @allowbbt: 1, if its allowed to access the bbt area
- *
- * Check, if the block is bad. Either by reading the bad block table or
- * calling of the scan function.
- */
-static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt)
-{
- struct nand_chip *this = mtd->priv;
-
- if (!this->bbt)
- return this->block_bad(mtd, ofs, getchip);
-
- /* Return info from the table */
- return nand_isbad_bbt (mtd, ofs, allowbbt);
-}
-
-/**
- * nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This function is used for small page
- * devices (256/512 Bytes per page)
- */
-static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- register struct nand_chip *this = mtd->priv;
-
- /* Begin command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- this->write_byte(mtd, readcmd);
- }
- this->write_byte(mtd, command);
-
- /* Set ALE and clear CLE to start address cycle */
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
-
- if (column != -1 || page_addr != -1) {
- this->hwcontrol(mtd, NAND_CTL_SETALE);
-
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
- column >>= 1;
- this->write_byte(mtd, column);
- }
- if (page_addr != -1) {
- this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
- this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000)
- this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
- }
- /* Latch in address */
- this->hwcontrol(mtd, NAND_CTL_CLRALE);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- udelay(this->chip_delay);
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- this->write_byte(mtd, NAND_CMD_STATUS);
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay (100);
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-/**
- * nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This is the version for the new large page devices
- * We dont have the seperate regions as we have in the small page devices.
- * We must emulate NAND_CMD_READOOB to keep the code compatible.
- *
- */
-static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- register struct nand_chip *this = mtd->priv;
-
- /* Emulate NAND_CMD_READOOB */
- if (command == NAND_CMD_READOOB) {
- column += mtd->oobblock;
- command = NAND_CMD_READ0;
- }
-
-
- /* Begin command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- /* Write out the command to the device. */
- this->write_byte(mtd, command);
- /* End command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
-
- if (column != -1 || page_addr != -1) {
- this->hwcontrol(mtd, NAND_CTL_SETALE);
-
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
- column >>= 1;
- this->write_byte(mtd, column & 0xff);
- this->write_byte(mtd, column >> 8);
- }
- if (page_addr != -1) {
- this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
- this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
- /* One more address cycle for devices > 128MiB */
- if (this->chipsize > (128 << 20))
- this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff));
- }
- /* Latch in address */
- this->hwcontrol(mtd, NAND_CTL_CLRALE);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_CACHEDPROG:
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
-
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- udelay(this->chip_delay);
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- this->write_byte(mtd, NAND_CMD_STATUS);
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- case NAND_CMD_READ0:
- /* Begin command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- /* Write out the start read command */
- this->write_byte(mtd, NAND_CMD_READSTART);
- /* End command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
- /* Fall through into ready check */
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay (100);
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-/**
- * nand_get_chip - [GENERIC] Get chip for selected access
- * @this: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
- *
- * Get the device and lock it for exclusive access
- */
-static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state)
-{
-
- DECLARE_WAITQUEUE (wait, current);
-
- /*
- * Grab the lock and see if the device is available
- */
-retry:
- spin_lock_bh (&this->chip_lock);
-
- if (this->state == FL_READY) {
- this->state = new_state;
- spin_unlock_bh (&this->chip_lock);
- return;
- }
-
- set_current_state (TASK_UNINTERRUPTIBLE);
- add_wait_queue (&this->wq, &wait);
- spin_unlock_bh (&this->chip_lock);
- schedule ();
- remove_wait_queue (&this->wq, &wait);
- goto retry;
-}
-
-/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @state: state to select the max. timeout value
- *
- * Wait for command done. This applies to erase and program only
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- *
-*/
-static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
-{
-
- unsigned long timeo = jiffies;
- int status;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
-
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay (100);
-
- spin_lock_bh (&this->chip_lock);
- if ((state == FL_ERASING) && (this->options & NAND_IS_AND))
- this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
- this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
-
- while (time_before(jiffies, timeo)) {
- /* Check, if we were interrupted */
- if (this->state != state) {
- spin_unlock_bh (&this->chip_lock);
- return 0;
- }
- if (this->dev_ready) {
- if (this->dev_ready(mtd))
- break;
- }
- if (this->read_byte(mtd) & NAND_STATUS_READY)
- break;
-
- spin_unlock_bh (&this->chip_lock);
- yield ();
- spin_lock_bh (&this->chip_lock);
- }
- status = (int) this->read_byte(mtd);
- spin_unlock_bh (&this->chip_lock);
-
- return status;
-}
-
-/**
- * nand_write_page - [GENERIC] write one page
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @page: startpage inside the chip, must be called with (page & this->pagemask)
- * @oob_buf: out of band data buffer
- * @oobsel: out of band selecttion structre
- * @cached: 1 = enable cached programming if supported by chip
- *
- * Nand_page_program function is used for write and writev !
- * This function will always program a full page of data
- * If you call it with a non page aligned buffer, you're lost :)
- *
- * Cached programming is not supported yet.
- */
-static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page,
- u_char *oob_buf, struct nand_oobinfo *oobsel, int cached)
-{
- int i, status;
- u_char ecc_code[8];
- int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
- int *oob_config = oobsel->eccpos;
- int datidx = 0, eccidx = 0, eccsteps = this->eccsteps;
- int eccbytes = 0;
-
- /* FIXME: Enable cached programming */
- cached = 0;
-
- /* Send command to begin auto page programming */
- this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page);
-
- /* Write out complete page of data, take care of eccmode */
- switch (eccmode) {
- /* No ecc, write all */
- case NAND_ECC_NONE:
- printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n");
- this->write_buf(mtd, this->data_poi, mtd->oobblock);
- break;
-
- /* Software ecc 3/256, write all */
- case NAND_ECC_SOFT:
- for (; eccsteps; eccsteps--) {
- this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
- for (i = 0; i < 3; i++, eccidx++)
- oob_buf[oob_config[eccidx]] = ecc_code[i];
- datidx += this->eccsize;
- }
- this->write_buf(mtd, this->data_poi, mtd->oobblock);
- break;
-
- /* Hardware ecc 8 byte / 512 byte data */
- case NAND_ECC_HW8_512:
- eccbytes += 2;
- /* Hardware ecc 6 byte / 512 byte data */
- case NAND_ECC_HW6_512:
- eccbytes += 3;
- /* Hardware ecc 3 byte / 256 data */
- /* Hardware ecc 3 byte / 512 byte data */
- case NAND_ECC_HW3_256:
- case NAND_ECC_HW3_512:
- eccbytes += 3;
- for (; eccsteps; eccsteps--) {
- /* enable hardware ecc logic for write */
- this->enable_hwecc(mtd, NAND_ECC_WRITE);
- this->write_buf(mtd, &this->data_poi[datidx], this->eccsize);
- this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
- for (i = 0; i < eccbytes; i++, eccidx++)
- oob_buf[oob_config[eccidx]] = ecc_code[i];
- /* If the hardware ecc provides syndromes then
- * the ecc code must be written immidiately after
- * the data bytes (words) */
- if (this->options & NAND_HWECC_SYNDROME)
- this->write_buf(mtd, ecc_code, eccbytes);
-
- datidx += this->eccsize;
- }
- break;
-
- default:
- printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
- BUG();
- }
-
- /* Write out OOB data */
- if (this->options & NAND_HWECC_SYNDROME)
- this->write_buf(mtd, &oob_buf[oobsel->eccbytes], mtd->oobsize - oobsel->eccbytes);
- else
- this->write_buf(mtd, oob_buf, mtd->oobsize);
-
- /* Send command to actually program the data */
- this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1);
-
- if (!cached) {
- /* call wait ready function */
- status = this->waitfunc (mtd, this, FL_WRITING);
- /* See if device thinks it succeeded */
- if (status & 0x01) {
- DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page);
- return -EIO;
- }
- } else {
- /* FIXME: Implement cached programming ! */
- /* wait until cache is ready*/
- // status = this->waitfunc (mtd, this, FL_CACHEDRPG);
- }
- return 0;
-}
-
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
-/**
- * nand_verify_pages - [GENERIC] verify the chip contents after a write
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @page: startpage inside the chip, must be called with (page & this->pagemask)
- * @numpages: number of pages to verify
- * @oob_buf: out of band data buffer
- * @oobsel: out of band selecttion structre
- * @chipnr: number of the current chip
- * @oobmode: 1 = full buffer verify, 0 = ecc only
- *
- * The NAND device assumes that it is always writing to a cleanly erased page.
- * Hence, it performs its internal write verification only on bits that
- * transitioned from 1 to 0. The device does NOT verify the whole page on a
- * byte by byte basis. It is possible that the page was not completely erased
- * or the page is becoming unusable due to wear. The read with ECC would catch
- * the error later when the ECC page check fails, but we would rather catch
- * it early in the page write stage. Better to write no data than invalid data.
- */
-static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
- u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode)
-{
- int i, j, datidx = 0, oobofs = 0, res = -EIO;
- int eccsteps = this->eccsteps;
- int hweccbytes;
- u_char oobdata[64];
-
- hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0;
-
- /* Send command to read back the first page */
- this->cmdfunc (mtd, NAND_CMD_READ0, 0, page);
-
- for(;;) {
- for (j = 0; j < eccsteps; j++) {
- /* Loop through and verify the data */
- if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) {
- DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
- goto out;
- }
- datidx += mtd->eccsize;
- /* Have we a hw generator layout ? */
- if (!hweccbytes)
- continue;
- if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) {
- DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
- goto out;
- }
- oobofs += hweccbytes;
- }
-
- /* check, if we must compare all data or if we just have to
- * compare the ecc bytes
- */
- if (oobmode) {
- if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) {
- DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
- goto out;
- }
- } else {
- /* Read always, else autoincrement fails */
- this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps);
-
- if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) {
- int ecccnt = oobsel->eccbytes;
-
- for (i = 0; i < ecccnt; i++) {
- int idx = oobsel->eccpos[i];
- if (oobdata[idx] != oob_buf[oobofs + idx] ) {
- DEBUG (MTD_DEBUG_LEVEL0,
- "%s: Failed ECC write "
- "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i);
- goto out;
- }
- }
- }
- }
- oobofs += mtd->oobsize - hweccbytes * eccsteps;
- page++;
- numpages--;
-
- /* Apply delay or wait for ready/busy pin
- * Do this before the AUTOINCR check, so no problems
- * arise if a chip which does auto increment
- * is marked as NOAUTOINCR by the board driver.
- * Do this also before returning, so the chip is
- * ready for the next command.
- */
- if (!this->dev_ready)
- udelay (this->chip_delay);
- else
- while (!this->dev_ready(mtd));
-
- /* All done, return happy */
- if (!numpages)
- return 0;
-
-
- /* Check, if the chip supports auto page increment */
- if (!NAND_CANAUTOINCR(this))
- this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
- }
- /*
- * Terminate the read command. We come here in case of an error
- * So we must issue a reset command.
- */
-out:
- this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1);
- return res;
-}
-#endif
-
-/**
- * nand_read - [MTD Interface] MTD compability function for nand_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * This function simply calls nand_read_ecc with oob buffer and oobsel = NULL
-*/
-static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
-{
- return nand_read_ecc (mtd, from, len, retlen, buf, NULL, NULL);
-}
-
-
-/**
- * nand_read_ecc - [MTD Interface] Read data with ECC
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- * @oob_buf: filesystem supplied oob data buffer
- * @oobsel: oob selection structure
- *
- * NAND read with ECC
- */
-static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel)
-{
- int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1;
- int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0;
- struct nand_chip *this = mtd->priv;
- u_char *data_poi, *oob_data = oob_buf;
- u_char ecc_calc[32];
- u_char ecc_code[32];
- int eccmode, eccsteps;
- int *oob_config, datidx;
- int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
- int eccbytes = 3;
- int compareecc = 1;
- int oobreadlen;
-
-
- DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
-
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n");
- *retlen = 0;
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd ,FL_READING);
-
- /* use userspace supplied oobinfo, if zero */
- if (oobsel == NULL)
- oobsel = &mtd->oobinfo;
-
- /* Autoplace of oob data ? Use the default placement scheme */
- if (oobsel->useecc == MTD_NANDECC_AUTOPLACE)
- oobsel = this->autooob;
-
- eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
- oob_config = oobsel->eccpos;
-
- /* Select the NAND device */
- chipnr = (int)(from >> this->chip_shift);
- this->select_chip(mtd, chipnr);
-
- /* First we calculate the starting page */
- realpage = (int) (from >> this->page_shift);
- page = realpage & this->pagemask;
-
- /* Get raw starting column */
- col = from & (mtd->oobblock - 1);
-
- end = mtd->oobblock;
- ecc = this->eccsize;
- switch (eccmode) {
- case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */
- eccbytes = 6;
- break;
- case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */
- eccbytes = 8;
- break;
- case NAND_ECC_NONE:
- compareecc = 0;
- break;
- }
-
- if (this->options & NAND_HWECC_SYNDROME)
- compareecc = 0;
-
- oobreadlen = mtd->oobsize;
- if (this->options & NAND_HWECC_SYNDROME)
- oobreadlen -= oobsel->eccbytes;
-
- /* Loop until all data read */
- while (read < len) {
-
- int aligned = (!col && (len - read) >= end);
- /*
- * If the read is not page aligned, we have to read into data buffer
- * due to ecc, else we read into return buffer direct
- */
- if (aligned)
- data_poi = &buf[read];
- else
- data_poi = this->data_buf;
-
- /* Check, if we have this page in the buffer
- *
- * FIXME: Make it work when we must provide oob data too,
- * check the usage of data_buf oob field
- */
- if (realpage == this->pagebuf && !oob_buf) {
- /* aligned read ? */
- if (aligned)
- memcpy (data_poi, this->data_buf, end);
- goto readdata;
- }
-
- /* Check, if we must send the read command */
- if (sndcmd) {
- this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
- sndcmd = 0;
- }
-
- /* get oob area, if we have no oob buffer from fs-driver */
- if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE)
- oob_data = &this->data_buf[end];
-
- eccsteps = this->eccsteps;
-
- switch (eccmode) {
- case NAND_ECC_NONE: { /* No ECC, Read in a page */
- static unsigned long lastwhinge = 0;
- if ((lastwhinge / HZ) != (jiffies / HZ)) {
- printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n");
- lastwhinge = jiffies;
- }
- this->read_buf(mtd, data_poi, end);
- break;
- }
-
- case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */
- this->read_buf(mtd, data_poi, end);
- for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc)
- this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
- break;
-
- case NAND_ECC_HW3_256: /* Hardware ECC 3 byte /256 byte data */
- case NAND_ECC_HW3_512: /* Hardware ECC 3 byte /512 byte data */
- case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */
- case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */
- for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=eccbytes, datidx += ecc) {
- this->enable_hwecc(mtd, NAND_ECC_READ);
- this->read_buf(mtd, &data_poi[datidx], ecc);
-
- /* HW ecc with syndrome calculation must read the
- * syndrome from flash immidiately after the data */
- if (!compareecc) {
- /* Some hw ecc generators need to know when the
- * syndrome is read from flash */
- this->enable_hwecc(mtd, NAND_ECC_READSYN);
- this->read_buf(mtd, &oob_data[i], eccbytes);
- /* We calc error correction directly, it checks the hw
- * generator for an error, reads back the syndrome and
- * does the error correction on the fly */
- if (this->correct_data(mtd, &data_poi[datidx], &oob_data[i], &ecc_code[i]) == -1) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: "
- "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr);
- ecc_failed++;
- }
- } else {
- this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
- }
- }
- break;
-
- default:
- printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
- BUG();
- }
-
- /* read oobdata */
- this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen);
-
- /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */
- if (!compareecc)
- goto readoob;
-
- /* Pick the ECC bytes out of the oob data */
- for (j = 0; j < oobsel->eccbytes; j++)
- ecc_code[j] = oob_data[oob_config[j]];
-
- /* correct data, if neccecary */
- for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) {
- ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]);
-
- /* Get next chunk of ecc bytes */
- j += eccbytes;
-
- /* Check, if we have a fs supplied oob-buffer,
- * This is the legacy mode. Used by YAFFS1
- * Should go away some day
- */
- if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) {
- int *p = (int *)(&oob_data[mtd->oobsize]);
- p[i] = ecc_status;
- }
-
- if (ecc_status == -1) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page);
- ecc_failed++;
- }
- }
-
- readoob:
- /* check, if we have a fs supplied oob-buffer */
- if (oob_buf) {
- /* without autoplace. Legacy mode used by YAFFS1 */
- switch(oobsel->useecc) {
- case MTD_NANDECC_AUTOPLACE:
- /* Walk through the autoplace chunks */
- for (i = 0, j = 0; j < mtd->oobavail; i++) {
- int from = oobsel->oobfree[i][0];
- int num = oobsel->oobfree[i][1];
- memcpy(&oob_buf[oob], &oob_data[from], num);
- j+= num;
- }
- oob += mtd->oobavail;
- break;
- case MTD_NANDECC_PLACE:
- /* YAFFS1 legacy mode */
- oob_data += this->eccsteps * sizeof (int);
- default:
- oob_data += mtd->oobsize;
- }
- }
- readdata:
- /* Partial page read, transfer data into fs buffer */
- if (!aligned) {
- for (j = col; j < end && read < len; j++)
- buf[read++] = data_poi[j];
- this->pagebuf = realpage;
- } else
- read += mtd->oobblock;
-
- /* Apply delay or wait for ready/busy pin
- * Do this before the AUTOINCR check, so no problems
- * arise if a chip which does auto increment
- * is marked as NOAUTOINCR by the board driver.
- */
- if (!this->dev_ready)
- udelay (this->chip_delay);
- else
- while (!this->dev_ready(mtd));
-
- if (read == len)
- break;
-
- /* For subsequent reads align to page boundary. */
- col = 0;
- /* Increment page address */
- realpage++;
-
- page = realpage & this->pagemask;
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- this->select_chip(mtd, -1);
- this->select_chip(mtd, chipnr);
- }
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
- sndcmd = 1;
- }
-
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- /*
- * Return success, if no ECC failures, else -EBADMSG
- * fs driver will take care of that, because
- * retlen == desired len and result == -EBADMSG
- */
- *retlen = read;
- return ecc_failed ? -EBADMSG : 0;
-}
-
-/**
- * nand_read_oob - [MTD Interface] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * NAND read out-of-band data from the spare area
- */
-static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
-{
- int i, col, page, chipnr;
- struct nand_chip *this = mtd->priv;
- int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
-
- DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
-
- /* Shift to get page */
- page = (int)(from >> this->page_shift);
- chipnr = (int)(from >> this->chip_shift);
-
- /* Mask to get column */
- col = from & (mtd->oobsize - 1);
-
- /* Initialize return length value */
- *retlen = 0;
-
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n");
- *retlen = 0;
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd , FL_READING);
-
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
-
- /* Send the read command */
- this->cmdfunc (mtd, NAND_CMD_READOOB, col, page & this->pagemask);
- /*
- * Read the data, if we read more than one page
- * oob data, let the device transfer the data !
- */
- i = 0;
- while (i < len) {
- int thislen = mtd->oobsize - col;
- thislen = min_t(int, thislen, len);
- this->read_buf(mtd, &buf[i], thislen);
- i += thislen;
-
- /* Apply delay or wait for ready/busy pin
- * Do this before the AUTOINCR check, so no problems
- * arise if a chip which does auto increment
- * is marked as NOAUTOINCR by the board driver.
- */
- if (!this->dev_ready)
- udelay (this->chip_delay);
- else
- while (!this->dev_ready(mtd));
-
- /* Read more ? */
- if (i < len) {
- page++;
- col = 0;
-
- /* Check, if we cross a chip boundary */
- if (!(page & this->pagemask)) {
- chipnr++;
- this->select_chip(mtd, -1);
- this->select_chip(mtd, chipnr);
- }
-
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) {
- /* For subsequent page reads set offset to 0 */
- this->cmdfunc (mtd, NAND_CMD_READOOB, 0x0, page & this->pagemask);
- }
- }
- }
-
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- /* Return happy */
- *retlen = len;
- return 0;
-}
-
-/**
- * nand_read_raw - [GENERIC] Read raw data including oob into buffer
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @from: offset to read from
- * @len: number of bytes to read
- * @ooblen: number of oob data bytes to read
- *
- * Read raw data including oob into buffer
- */
-int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen)
-{
- struct nand_chip *this = mtd->priv;
- int page = (int) (from >> this->page_shift);
- int chip = (int) (from >> this->chip_shift);
- int sndcmd = 1;
- int cnt = 0;
- int pagesize = mtd->oobblock + mtd->oobsize;
- int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
-
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n");
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd , FL_READING);
-
- this->select_chip (mtd, chip);
-
- /* Add requested oob length */
- len += ooblen;
-
- while (len) {
- if (sndcmd)
- this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask);
- sndcmd = 0;
-
- this->read_buf (mtd, &buf[cnt], pagesize);
-
- len -= pagesize;
- cnt += pagesize;
- page++;
-
- if (!this->dev_ready)
- udelay (this->chip_delay);
- else
- while (!this->dev_ready(mtd));
-
- /* Check, if the chip supports auto page increment */
- if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
- sndcmd = 1;
- }
-
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
- return 0;
-}
-
-
-/**
- * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer
- * @mtd: MTD device structure
- * @fsbuf: buffer given by fs driver
- * @oobsel: out of band selection structre
- * @autoplace: 1 = place given buffer into the oob bytes
- * @numpages: number of pages to prepare
- *
- * Return:
- * 1. Filesystem buffer available and autoplacement is off,
- * return filesystem buffer
- * 2. No filesystem buffer or autoplace is off, return internal
- * buffer
- * 3. Filesystem buffer is given and autoplace selected
- * put data from fs buffer into internal buffer and
- * retrun internal buffer
- *
- * Note: The internal buffer is filled with 0xff. This must
- * be done only once, when no autoplacement happens
- * Autoplacement sets the buffer dirty flag, which
- * forces the 0xff fill before using the buffer again.
- *
-*/
-static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel,
- int autoplace, int numpages)
-{
- struct nand_chip *this = mtd->priv;
- int i, len, ofs;
-
- /* Zero copy fs supplied buffer */
- if (fsbuf && !autoplace)
- return fsbuf;
-
- /* Check, if the buffer must be filled with ff again */
- if (this->oobdirty) {
- memset (this->oob_buf, 0xff,
- mtd->oobsize << (this->phys_erase_shift - this->page_shift));
- this->oobdirty = 0;
- }
-
- /* If we have no autoplacement or no fs buffer use the internal one */
- if (!autoplace || !fsbuf)
- return this->oob_buf;
-
- /* Walk through the pages and place the data */
- this->oobdirty = 1;
- ofs = 0;
- while (numpages--) {
- for (i = 0, len = 0; len < mtd->oobavail; i++) {
- int to = ofs + oobsel->oobfree[i][0];
- int num = oobsel->oobfree[i][1];
- memcpy (&this->oob_buf[to], fsbuf, num);
- len += num;
- fsbuf += num;
- }
- ofs += mtd->oobavail;
- }
- return this->oob_buf;
-}
-
-#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0
-
-/**
- * nand_write - [MTD Interface] compability function for nand_write_ecc
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL
- *
-*/
-static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
-{
- return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL));
-}
-
-/**
- * nand_write_ecc - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- * @eccbuf: filesystem supplied oob data buffer
- * @oobsel: oob selection structure
- *
- * NAND write with ECC
- */
-static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
- size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
-{
- int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr;
- int autoplace = 0, numpages, totalpages;
- struct nand_chip *this = mtd->priv;
- u_char *oobbuf, *bufstart;
- int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
-
- DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
-
- /* Initialize retlen, in case of early exit */
- *retlen = 0;
-
- /* Do not allow write past end of device */
- if ((to + len) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n");
- return -EINVAL;
- }
-
- /* reject writes, which are not page aligned */
- if (NOTALIGNED (to) || NOTALIGNED(len)) {
- printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd, FL_WRITING);
-
- /* Calculate chipnr */
- chipnr = (int)(to >> this->chip_shift);
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd))
- goto out;
-
- /* if oobsel is NULL, use chip defaults */
- if (oobsel == NULL)
- oobsel = &mtd->oobinfo;
-
- /* Autoplace of oob data ? Use the default placement scheme */
- if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
- oobsel = this->autooob;
- autoplace = 1;
- }
-
- /* Setup variables and oob buffer */
- totalpages = len >> this->page_shift;
- page = (int) (to >> this->page_shift);
- /* Invalidate the page cache, if we write to the cached page */
- if (page <= this->pagebuf && this->pagebuf < (page + totalpages))
- this->pagebuf = -1;
-
- /* Set it relative to chip */
- page &= this->pagemask;
- startpage = page;
- /* Calc number of pages we can write in one go */
- numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages);
- oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages);
- bufstart = (u_char *)buf;
-
- /* Loop until all data is written */
- while (written < len) {
-
- this->data_poi = (u_char*) &buf[written];
- /* Write one page. If this is the last page to write
- * or the last page in this block, then use the
- * real pageprogram command, else select cached programming
- * if supported by the chip.
- */
- ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0));
- if (ret) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret);
- goto out;
- }
- /* Next oob page */
- oob += mtd->oobsize;
- /* Update written bytes count */
- written += mtd->oobblock;
- if (written == len)
- goto cmp;
-
- /* Increment page address */
- page++;
-
- /* Have we hit a block boundary ? Then we have to verify and
- * if verify is ok, we have to setup the oob buffer for
- * the next pages.
- */
- if (!(page & (ppblock - 1))){
- int ofs;
- this->data_poi = bufstart;
- ret = nand_verify_pages (mtd, this, startpage,
- page - startpage,
- oobbuf, oobsel, chipnr, (eccbuf != NULL));
- if (ret) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
- goto out;
- }
- *retlen = written;
-
- ofs = autoplace ? mtd->oobavail : mtd->oobsize;
- if (eccbuf)
- eccbuf += (page - startpage) * ofs;
- totalpages -= page - startpage;
- numpages = min (totalpages, ppblock);
- page &= this->pagemask;
- startpage = page;
- oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel,
- autoplace, numpages);
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- this->select_chip(mtd, -1);
- this->select_chip(mtd, chipnr);
- }
- }
- }
- /* Verify the remaining pages */
-cmp:
- this->data_poi = bufstart;
- ret = nand_verify_pages (mtd, this, startpage, totalpages,
- oobbuf, oobsel, chipnr, (eccbuf != NULL));
- if (!ret)
- *retlen = written;
- else
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
-
-out:
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- return ret;
-}
-
-
-/**
- * nand_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write out-of-band
- */
-static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
-{
- int column, page, status, ret = -EIO, chipnr;
- struct nand_chip *this = mtd->priv;
-
- DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
-
- /* Shift to get page */
- page = (int) (to >> this->page_shift);
- chipnr = (int) (to >> this->chip_shift);
-
- /* Mask to get column */
- column = to & (mtd->oobsize - 1);
-
- /* Initialize return length value */
- *retlen = 0;
-
- /* Do not allow write past end of page */
- if ((column + len) > mtd->oobsize) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n");
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd, FL_WRITING);
-
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
-
- /* Reset the chip. Some chips (like the Toshiba TC5832DC found
- in one of my DiskOnChip 2000 test units) will clear the whole
- data page too if we don't do this. I have no clue why, but
- I seem to have 'fixed' it in the doc2000 driver in
- August 1999. dwmw2. */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd))
- goto out;
-
- /* Invalidate the page cache, if we write to the cached page */
- if (page == this->pagebuf)
- this->pagebuf = -1;
-
- if (NAND_MUST_PAD(this)) {
- /* Write out desired data */
- this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask);
- /* prepad 0xff for partial programming */
- this->write_buf(mtd, ffchars, column);
- /* write data */
- this->write_buf(mtd, buf, len);
- /* postpad 0xff for partial programming */
- this->write_buf(mtd, ffchars, mtd->oobsize - (len+column));
- } else {
- /* Write out desired data */
- this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask);
- /* write data */
- this->write_buf(mtd, buf, len);
- }
- /* Send command to program the OOB data */
- this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = this->waitfunc (mtd, this, FL_WRITING);
-
- /* See if device thinks it succeeded */
- if (status & 0x01) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page);
- ret = -EIO;
- goto out;
- }
- /* Return happy */
- *retlen = len;
-
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask);
-
- if (this->verify_buf(mtd, buf, len)) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page);
- ret = -EIO;
- goto out;
- }
-#endif
- ret = 0;
-out:
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- return ret;
-}
-
-
-/**
- * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc
- * @mtd: MTD device structure
- * @vecs: the iovectors to write
- * @count: number of vectors
- * @to: offset to write to
- * @retlen: pointer to variable to store the number of written bytes
- *
- * NAND write with kvec. This just calls the ecc function
- */
-static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
- loff_t to, size_t * retlen)
-{
- return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL));
-}
-
-/**
- * nand_writev_ecc - [MTD Interface] write with iovec with ecc
- * @mtd: MTD device structure
- * @vecs: the iovectors to write
- * @count: number of vectors
- * @to: offset to write to
- * @retlen: pointer to variable to store the number of written bytes
- * @eccbuf: filesystem supplied oob data buffer
- * @oobsel: oob selection structure
- *
- * NAND write with iovec with ecc
- */
-static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
- loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel)
-{
- int i, page, len, total_len, ret = -EIO, written = 0, chipnr;
- int oob, numpages, autoplace = 0, startpage;
- struct nand_chip *this = mtd->priv;
- int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
- u_char *oobbuf, *bufstart;
-
- /* Preset written len for early exit */
- *retlen = 0;
-
- /* Calculate total length of data */
- total_len = 0;
- for (i = 0; i < count; i++)
- total_len += (int) vecs[i].iov_len;
-
- DEBUG (MTD_DEBUG_LEVEL3,
- "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count);
-
- /* Do not allow write past end of page */
- if ((to + total_len) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n");
- return -EINVAL;
- }
-
- /* reject writes, which are not page aligned */
- if (NOTALIGNED (to) || NOTALIGNED(total_len)) {
- printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
- return -EINVAL;
- }
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd, FL_WRITING);
-
- /* Get the current chip-nr */
- chipnr = (int) (to >> this->chip_shift);
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd))
- goto out;
-
- /* if oobsel is NULL, use chip defaults */
- if (oobsel == NULL)
- oobsel = &mtd->oobinfo;
-
- /* Autoplace of oob data ? Use the default placement scheme */
- if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
- oobsel = this->autooob;
- autoplace = 1;
- }
-
- /* Setup start page */
- page = (int) (to >> this->page_shift);
- /* Invalidate the page cache, if we write to the cached page */
- if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift))
- this->pagebuf = -1;
-
- startpage = page & this->pagemask;
-
- /* Loop until all kvec' data has been written */
- len = 0;
- while (count) {
- /* If the given tuple is >= pagesize then
- * write it out from the iov
- */
- if ((vecs->iov_len - len) >= mtd->oobblock) {
- /* Calc number of pages we can write
- * out of this iov in one go */
- numpages = (vecs->iov_len - len) >> this->page_shift;
- /* Do not cross block boundaries */
- numpages = min (ppblock - (startpage & (ppblock - 1)), numpages);
- oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
- bufstart = (u_char *)vecs->iov_base;
- bufstart += len;
- this->data_poi = bufstart;
- oob = 0;
- for (i = 1; i <= numpages; i++) {
- /* Write one page. If this is the last page to write
- * then use the real pageprogram command, else select
- * cached programming if supported by the chip.
- */
- ret = nand_write_page (mtd, this, page & this->pagemask,
- &oobbuf[oob], oobsel, i != numpages);
- if (ret)
- goto out;
- this->data_poi += mtd->oobblock;
- len += mtd->oobblock;
- oob += mtd->oobsize;
- page++;
- }
- /* Check, if we have to switch to the next tuple */
- if (len >= (int) vecs->iov_len) {
- vecs++;
- len = 0;
- count--;
- }
- } else {
- /* We must use the internal buffer, read data out of each
- * tuple until we have a full page to write
- */
- int cnt = 0;
- while (cnt < mtd->oobblock) {
- if (vecs->iov_base != NULL && vecs->iov_len)
- this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++];
- /* Check, if we have to switch to the next tuple */
- if (len >= (int) vecs->iov_len) {
- vecs++;
- len = 0;
- count--;
- }
- }
- this->pagebuf = page;
- this->data_poi = this->data_buf;
- bufstart = this->data_poi;
- numpages = 1;
- oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
- ret = nand_write_page (mtd, this, page & this->pagemask,
- oobbuf, oobsel, 0);
- if (ret)
- goto out;
- page++;
- }
-
- this->data_poi = bufstart;
- ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0);
- if (ret)
- goto out;
-
- written += mtd->oobblock * numpages;
- /* All done ? */
- if (!count)
- break;
-
- startpage = page & this->pagemask;
- /* Check, if we cross a chip boundary */
- if (!startpage) {
- chipnr++;
- this->select_chip(mtd, -1);
- this->select_chip(mtd, chipnr);
- }
- }
- ret = 0;
-out:
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- *retlen = written;
- return ret;
-}
-
-/**
- * single_erease_cmd - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips
- */
-static void single_erase_cmd (struct mtd_info *mtd, int page)
-{
- struct nand_chip *this = mtd->priv;
- /* Send commands to erase a block */
- this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
- this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
- * multi_erease_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function
- * Erase 4 consecutive blocks
- */
-static void multi_erase_cmd (struct mtd_info *mtd, int page)
-{
- struct nand_chip *this = mtd->priv;
- /* Send commands to erase a block */
- this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
- this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
- this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
- this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
- this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
- * nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- *
- * Erase one ore more blocks
- */
-static int nand_erase (struct mtd_info *mtd, struct erase_info *instr)
-{
- return nand_erase_nand (mtd, instr, 0);
-}
-
-/**
- * nand_erase_intern - [NAND Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
- *
- * Erase one ore more blocks
- */
-int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt)
-{
- int page, len, status, pages_per_block, ret, chipnr;
- struct nand_chip *this = mtd->priv;
-
- DEBUG (MTD_DEBUG_LEVEL3,
- "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
-
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << this->phys_erase_shift) - 1)) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
- return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << this->phys_erase_shift) - 1)) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n");
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n");
- return -EINVAL;
- }
-
- instr->fail_addr = 0xffffffff;
-
- /* Grab the lock and see if the device is available */
- nand_get_chip (this, mtd, FL_ERASING);
-
- /* Shift to get first page */
- page = (int) (instr->addr >> this->page_shift);
- chipnr = (int) (instr->addr >> this->chip_shift);
-
- /* Calculate pages in each block */
- pages_per_block = 1 << (this->phys_erase_shift - this->page_shift);
-
- /* Select the NAND device */
- this->select_chip(mtd, chipnr);
-
- /* Check the WP bit */
- /* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n");
- instr->state = MTD_ERASE_FAILED;
- goto erase_exit;
- }
-
- /* Loop through the pages */
- len = instr->len;
-
- instr->state = MTD_ERASING;
-
- while (len) {
- /* Check if we have a bad block, we do not erase bad blocks ! */
- if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) {
- printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page);
- instr->state = MTD_ERASE_FAILED;
- goto erase_exit;
- }
-
- /* Invalidate the page cache, if we erase the block which contains
- the current cached page */
- if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block))
- this->pagebuf = -1;
-
- this->erase_cmd (mtd, page & this->pagemask);
-
- status = this->waitfunc (mtd, this, FL_ERASING);
-
- /* See if block erase succeeded */
- if (status & 0x01) {
- DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page);
- instr->state = MTD_ERASE_FAILED;
- instr->fail_addr = (page << this->page_shift);
- goto erase_exit;
- }
-
- /* Increment page address and decrement length */
- len -= (1 << this->phys_erase_shift);
- page += pages_per_block;
-
- /* Check, if we cross a chip boundary */
- if (len && !(page & this->pagemask)) {
- chipnr++;
- this->select_chip(mtd, -1);
- this->select_chip(mtd, chipnr);
- }
- }
- instr->state = MTD_ERASE_DONE;
-
-erase_exit:
-
- ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
- /* Do call back function */
- if (!ret)
- mtd_erase_callback(instr);
-
- /* Deselect and wake up anyone waiting on the device */
- nand_release_chip(mtd);
-
- /* Return more or less happy */
- return ret;
-}
-
-/**
- * nand_sync - [MTD Interface] sync
- * @mtd: MTD device structure
- *
- * Sync is actually a wait for chip ready function
- */
-static void nand_sync (struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- DECLARE_WAITQUEUE (wait, current);
-
- DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n");
-
-retry:
- /* Grab the spinlock */
- spin_lock_bh (&this->chip_lock);
-
- /* See what's going on */
- switch (this->state) {
- case FL_READY:
- case FL_SYNCING:
- this->state = FL_SYNCING;
- spin_unlock_bh (&this->chip_lock);
- break;
-
- default:
- /* Not an idle state */
- add_wait_queue (&this->wq, &wait);
- spin_unlock_bh (&this->chip_lock);
- schedule ();
-
- remove_wait_queue (&this->wq, &wait);
- goto retry;
- }
-
- /* Lock the device */
- spin_lock_bh (&this->chip_lock);
-
- /* Set the device to be ready again */
- if (this->state == FL_SYNCING) {
- this->state = FL_READY;
- wake_up (&this->wq);
- }
-
- /* Unlock the device */
- spin_unlock_bh (&this->chip_lock);
-}
-
-
-/**
- * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs)
-{
- /* Check for invalid offset */
- if (ofs > mtd->size)
- return -EINVAL;
-
- return nand_block_checkbad (mtd, ofs, 1, 0);
-}
-
-/**
- * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *this = mtd->priv;
- int ret;
-
- if ((ret = nand_block_isbad(mtd, ofs))) {
- /* If it was bad already, return success and do nothing. */
- if (ret > 0)
- return 0;
- return ret;
- }
-
- return this->block_markbad(mtd, ofs);
-}
-
-/**
- * nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- *
- * This fills out all the not initialized function pointers
- * with the defaults.
- * The flash ID is read and the mtd/chip structures are
- * filled with the appropriate values. Buffers are allocated if
- * they are not provided by the board driver
- *
- */
-int nand_scan (struct mtd_info *mtd, int maxchips)
-{
- int i, j, nand_maf_id, nand_dev_id, busw;
- struct nand_chip *this = mtd->priv;
-
- /* Get buswidth to select the correct functions*/
- busw = this->options & NAND_BUSWIDTH_16;
-
- /* check for proper chip_delay setup, set 20us if not */
- if (!this->chip_delay)
- this->chip_delay = 20;
-
- /* check, if a user supplied command function given */
- if (this->cmdfunc == NULL)
- this->cmdfunc = nand_command;
-
- /* check, if a user supplied wait function given */
- if (this->waitfunc == NULL)
- this->waitfunc = nand_wait;
-
- if (!this->select_chip)
- this->select_chip = nand_select_chip;
- if (!this->write_byte)
- this->write_byte = busw ? nand_write_byte16 : nand_write_byte;
- if (!this->read_byte)
- this->read_byte = busw ? nand_read_byte16 : nand_read_byte;
- if (!this->write_word)
- this->write_word = nand_write_word;
- if (!this->read_word)
- this->read_word = nand_read_word;
- if (!this->block_bad)
- this->block_bad = nand_block_bad;
- if (!this->block_markbad)
- this->block_markbad = nand_default_block_markbad;
- if (!this->write_buf)
- this->write_buf = busw ? nand_write_buf16 : nand_write_buf;
- if (!this->read_buf)
- this->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!this->verify_buf)
- this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
- if (!this->scan_bbt)
- this->scan_bbt = nand_default_bbt;
-
- /* Select the device */
- this->select_chip(mtd, 0);
-
- /* Send the command for reading device ID */
- this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
-
- /* Read manufacturer and device IDs */
- nand_maf_id = this->read_byte(mtd);
- nand_dev_id = this->read_byte(mtd);
-
- /* Print and store flash device information */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-
- if (nand_dev_id != nand_flash_ids[i].id)
- continue;
-
- if (!mtd->name) mtd->name = nand_flash_ids[i].name;
- this->chipsize = nand_flash_ids[i].chipsize << 20;
-
- /* New devices have all the information in additional id bytes */
- if (!nand_flash_ids[i].pagesize) {
- int extid;
- /* The 3rd id byte contains non relevant data ATM */
- extid = this->read_byte(mtd);
- /* The 4th id byte is the important one */
- extid = this->read_byte(mtd);
- /* Calc pagesize */
- mtd->oobblock = 1024 << (extid & 0x3);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x03)) * (mtd->oobblock / 512);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
-
- } else {
- /* Old devices have this data hardcoded in the
- * device id table */
- mtd->erasesize = nand_flash_ids[i].erasesize;
- mtd->oobblock = nand_flash_ids[i].pagesize;
- mtd->oobsize = mtd->oobblock / 32;
- busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16;
- }
-
- /* Check, if buswidth is correct. Hardware drivers should set
- * this correct ! */
- if (busw != (this->options & NAND_BUSWIDTH_16)) {
- printk (KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
- nand_manuf_ids[i].name , mtd->name);
- printk (KERN_WARNING
- "NAND bus width %d instead %d bit\n",
- (this->options & NAND_BUSWIDTH_16) ? 16 : 8,
- busw ? 16 : 8);
- this->select_chip(mtd, -1);
- return 1;
- }
-
- /* Calculate the address shift from the page size */
- this->page_shift = ffs(mtd->oobblock) - 1;
- this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1;
- this->chip_shift = ffs(this->chipsize) - 1;
-
- /* Set the bad block position */
- this->badblockpos = mtd->oobblock > 512 ?
- NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
-
- /* Get chip options, preserve non chip based options */
- this->options &= ~NAND_CHIPOPTIONS_MSK;
- this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK;
- /* Set this as a default. Board drivers can override it, if neccecary */
- this->options |= NAND_NO_AUTOINCR;
- /* Check if this is a not a samsung device. Do not clear the options
- * for chips which are not having an extended id.
- */
- if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize)
- this->options &= ~NAND_SAMSUNG_LP_OPTIONS;
-
- /* Check for AND chips with 4 page planes */
- if (this->options & NAND_4PAGE_ARRAY)
- this->erase_cmd = multi_erase_cmd;
- else
- this->erase_cmd = single_erase_cmd;
-
- /* Do not replace user supplied command function ! */
- if (mtd->oobblock > 512 && this->cmdfunc == nand_command)
- this->cmdfunc = nand_command_lp;
-
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == nand_maf_id)
- break;
- }
- printk (KERN_INFO "NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
- nand_manuf_ids[j].name , nand_flash_ids[i].name);
- break;
- }
-
- if (!nand_flash_ids[i].name) {
- printk (KERN_WARNING "No NAND device found!!!\n");
- this->select_chip(mtd, -1);
- return 1;
- }
-
- for (i=1; i < maxchips; i++) {
- this->select_chip(mtd, i);
-
- /* Send the command for reading device ID */
- this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
-
- /* Read manufacturer and device IDs */
- if (nand_maf_id != this->read_byte(mtd) ||
- nand_dev_id != this->read_byte(mtd))
- break;
- }
- if (i > 1)
- printk(KERN_INFO "%d NAND chips detected\n", i);
-
- /* Allocate buffers, if neccecary */
- if (!this->oob_buf) {
- size_t len;
- len = mtd->oobsize << (this->phys_erase_shift - this->page_shift);
- this->oob_buf = kmalloc (len, GFP_KERNEL);
- if (!this->oob_buf) {
- printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n");
- return -ENOMEM;
- }
- this->options |= NAND_OOBBUF_ALLOC;
- }
-
- if (!this->data_buf) {
- size_t len;
- len = mtd->oobblock + mtd->oobsize;
- this->data_buf = kmalloc (len, GFP_KERNEL);
- if (!this->data_buf) {
- if (this->options & NAND_OOBBUF_ALLOC)
- kfree (this->oob_buf);
- printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n");
- return -ENOMEM;
- }
- this->options |= NAND_DATABUF_ALLOC;
- }
-
- /* Store the number of chips and calc total size for mtd */
- this->numchips = i;
- mtd->size = i * this->chipsize;
- /* Convert chipsize to number of pages per chip -1. */
- this->pagemask = (this->chipsize >> this->page_shift) - 1;
- /* Preset the internal oob buffer */
- memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift));
-
- /* If no default placement scheme is given, select an
- * appropriate one */
- if (!this->autooob) {
- /* Select the appropriate default oob placement scheme for
- * placement agnostic filesystems */
- switch (mtd->oobsize) {
- case 8:
- this->autooob = &nand_oob_8;
- break;
- case 16:
- this->autooob = &nand_oob_16;
- break;
- case 64:
- this->autooob = &nand_oob_64;
- break;
- default:
- printk (KERN_WARNING "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- BUG();
- }
- }
-
- /* The number of bytes available for the filesystem to place fs dependend
- * oob data */
- if (this->options & NAND_BUSWIDTH_16) {
- mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 2);
- if (this->autooob->eccbytes & 0x01)
- mtd->oobavail--;
- } else
- mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 1);
-
- /*
- * check ECC mode, default to software
- * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize
- * fallback to software ECC
- */
- this->eccsize = 256; /* set default eccsize */
-
- switch (this->eccmode) {
-
- case NAND_ECC_HW3_512:
- case NAND_ECC_HW6_512:
- case NAND_ECC_HW8_512:
- if (mtd->oobblock == 256) {
- printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n");
- this->eccmode = NAND_ECC_SOFT;
- this->calculate_ecc = nand_calculate_ecc;
- this->correct_data = nand_correct_data;
- break;
- } else
- this->eccsize = 512; /* set eccsize to 512 and fall through for function check */
-
- case NAND_ECC_HW3_256:
- if (this->calculate_ecc && this->correct_data && this->enable_hwecc)
- break;
- printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n");
- BUG();
-
- case NAND_ECC_NONE:
- printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n");
- this->eccmode = NAND_ECC_NONE;
- break;
-
- case NAND_ECC_SOFT:
- this->calculate_ecc = nand_calculate_ecc;
- this->correct_data = nand_correct_data;
- break;
-
- default:
- printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
- BUG();
- }
-
- mtd->eccsize = this->eccsize;
-
- /* Set the number of read / write steps for one page to ensure ECC generation */
- switch (this->eccmode) {
- case NAND_ECC_HW3_512:
- case NAND_ECC_HW6_512:
- case NAND_ECC_HW8_512:
- this->eccsteps = mtd->oobblock / 512;
- break;
- case NAND_ECC_HW3_256:
- case NAND_ECC_SOFT:
- this->eccsteps = mtd->oobblock / 256;
- break;
-
- case NAND_ECC_NONE:
- this->eccsteps = 1;
- break;
- }
-
- /* Initialize state, waitqueue and spinlock */
- this->state = FL_READY;
- init_waitqueue_head (&this->wq);
- spin_lock_init (&this->chip_lock);
-
- /* De-select the device */
- this->select_chip(mtd, -1);
-
- /* Invalidate the pagebuffer reference */
- this->pagebuf = -1;
-
- /* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC;
- mtd->ecctype = MTD_ECC_SW;
- mtd->erase = nand_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
- mtd->read = nand_read;
- mtd->write = nand_write;
- mtd->read_ecc = nand_read_ecc;
- mtd->write_ecc = nand_write_ecc;
- mtd->read_oob = nand_read_oob;
- mtd->write_oob = nand_write_oob;
- mtd->readv = NULL;
- mtd->writev = nand_writev;
- mtd->writev_ecc = nand_writev_ecc;
- mtd->sync = nand_sync;
- mtd->lock = NULL;
- mtd->unlock = NULL;
- mtd->suspend = NULL;
- mtd->resume = NULL;
- mtd->block_isbad = nand_block_isbad;
- mtd->block_markbad = nand_block_markbad;
-
- /* and make the autooob the default one */
- memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo));
-
- mtd->owner = THIS_MODULE;
-
- /* Build bad block table */
- return this->scan_bbt (mtd);
-}
-
-/**
- * nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
-*/
-void nand_release (struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
-
-#ifdef CONFIG_MTD_PARTITIONS
- /* Deregister partitions */
- del_mtd_partitions (mtd);
-#endif
- /* Deregister the device */
- del_mtd_device (mtd);
-
- /* Free bad block table memory, if allocated */
- if (this->bbt)
- kfree (this->bbt);
- /* Buffer allocated by nand_scan ? */
- if (this->options & NAND_OOBBUF_ALLOC)
- kfree (this->oob_buf);
- /* Buffer allocated by nand_scan ? */
- if (this->options & NAND_DATABUF_ALLOC)
- kfree (this->data_buf);
-}
-
-EXPORT_SYMBOL (nand_scan);
-EXPORT_SYMBOL (nand_release);
-
-MODULE_LICENSE ("GPL");
-MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION ("Generic NAND flash driver code");
+++ /dev/null
-/*
- * drivers/mtd/nand_bbt.c
- *
- * Overview:
- * Bad block table support for the NAND driver
- *
- * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
- *
- * $Id: nand_bbt.c,v 1.24 2004/06/28 08:25:35 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Description:
- *
- * When nand_scan_bbt is called, then it tries to find the bad block table
- * depending on the options in the bbt descriptor(s). If a bbt is found
- * then the contents are read and the memory based bbt is created. If a
- * mirrored bbt is selected then the mirror is searched too and the
- * versions are compared. If the mirror has a greater version number
- * than the mirror bbt is used to build the memory based bbt.
- * If the tables are not versioned, then we "or" the bad block information.
- * If one of the bbt's is out of date or does not exist it is (re)created.
- * If no bbt exists at all then the device is scanned for factory marked
- * good / bad blocks and the bad block tables are created.
- *
- * For manufacturer created bbts like the one found on M-SYS DOC devices
- * the bbt is searched and read but never created
- *
- * The autogenerated bad block table is located in the last good blocks
- * of the device. The table is mirrored, so it can be updated eventually.
- * The table is marked in the oob area with an ident pattern and a version
- * number which indicates which of both tables is more up to date.
- *
- * The table uses 2 bits per block
- * 11b: block is good
- * 00b: block is factory marked bad
- * 01b, 10b: block is marked bad due to wear
- *
- * The memory bad block table uses the following scheme:
- * 00b: block is good
- * 01b: block is marked bad due to wear
- * 10b: block is reserved (to protect the bbt area)
- * 11b: block is factory marked bad
- *
- * Multichip devices like DOC store the bad block info per floor.
- *
- * Following assumptions are made:
- * - bbts start at a page boundary, if autolocated on a block boundary
- * - the space neccecary for a bbt in FLASH does not exceed a block boundary
- *
- */
-
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/compatmac.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-
-
-/**
- * check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @len: the length of buffer to search
- * @paglen: the pagelength
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers.
- * If the SCAN_EMPTY option is set then check, if all bytes except the
- * pattern area contain 0xff
- *
-*/
-static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
-{
- int i, end;
- uint8_t *p = buf;
-
- end = paglen + td->offs;
- if (td->options & NAND_BBT_SCANEMPTY) {
- for (i = 0; i < end; i++) {
- if (p[i] != 0xff)
- return -1;
- }
- }
- p += end;
-
- /* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
-
- p += td->len;
- end += td->len;
- if (td->options & NAND_BBT_SCANEMPTY) {
- for (i = end; i < len; i++) {
- if (*p++ != 0xff)
- return -1;
- }
- }
- return 0;
-}
-
-/**
- * read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @page: the starting page
- * @num: the number of bbt descriptors to read
- * @bits: number of bits per block
- * @offs: offset in the memory table
- *
- * Read the bad block table starting from page.
- *
- */
-static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
- int bits, int offs, int reserved_block_code)
-{
- int res, i, j, act = 0;
- struct nand_chip *this = mtd->priv;
- size_t retlen, len, totlen;
- loff_t from;
- uint8_t msk = (uint8_t) ((1 << bits) - 1);
-
- totlen = (num * bits) >> 3;
- from = ((loff_t)page) << this->page_shift;
-
- while (totlen) {
- len = min (totlen, (size_t) (1 << this->bbt_erase_shift));
- res = mtd->read_ecc (mtd, from, len, &retlen, buf, NULL, this->autooob);
- if (res < 0) {
- if (retlen != len) {
- printk (KERN_INFO "nand_bbt: Error reading bad block table\n");
- return res;
- }
- printk (KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
- }
-
- /* Analyse data */
- for (i = 0; i < len; i++) {
- uint8_t dat = buf[i];
- for (j = 0; j < 8; j += bits, act += 2) {
- uint8_t tmp = (dat >> j) & msk;
- if (tmp == msk)
- continue;
- if (reserved_block_code &&
- (tmp == reserved_block_code)) {
- printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
- continue;
- }
- /* Leave it for now, if its matured we can move this
- * message to MTD_DEBUG_LEVEL0 */
- printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- /* Factory marked bad or worn out ? */
- if (tmp == 0)
- this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
- else
- this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
- }
- }
- totlen -= len;
- from += len;
- }
- return 0;
-}
-
-/**
- * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @chip: read the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
- *
- * Read the bad block table for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
-*/
-static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
-{
- struct nand_chip *this = mtd->priv;
- int res = 0, i;
- int bits;
-
- bits = td->options & NAND_BBT_NRBITS_MSK;
- if (td->options & NAND_BBT_PERCHIP) {
- int offs = 0;
- for (i = 0; i < this->numchips; i++) {
- if (chip == -1 || chip == i)
- res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
- if (res)
- return res;
- offs += this->chipsize >> (this->bbt_erase_shift + 2);
- }
- } else {
- res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
- if (res)
- return res;
- }
- return 0;
-}
-
-/**
- * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- *
- * Read the bad block table(s) for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
- *
-*/
-static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td,
- struct nand_bbt_descr *md)
-{
- struct nand_chip *this = mtd->priv;
-
- /* Read the primary version, if available */
- if (td->options & NAND_BBT_VERSION) {
- nand_read_raw (mtd, buf, td->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
- td->version[0] = buf[mtd->oobblock + td->veroffs];
- printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]);
- }
-
- /* Read the mirror version, if available */
- if (md && (md->options & NAND_BBT_VERSION)) {
- nand_read_raw (mtd, buf, md->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
- md->version[0] = buf[mtd->oobblock + md->veroffs];
- printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]);
- }
-
- return 1;
-}
-
-/**
- * create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- * @chip: create the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
- *
- * Create a bad block table by scanning the device
- * for the given good/bad block identify pattern
- */
-static void create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
-{
- struct nand_chip *this = mtd->priv;
- int i, j, numblocks, len, scanlen;
- int startblock;
- loff_t from;
- size_t readlen, ooblen;
-
- printk (KERN_INFO "Scanning device for bad blocks\n");
-
- if (bd->options & NAND_BBT_SCANALLPAGES)
- len = 1 << (this->bbt_erase_shift - this->page_shift);
- else {
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- len = 2;
- else
- len = 1;
- }
- scanlen = mtd->oobblock + mtd->oobsize;
- readlen = len * mtd->oobblock;
- ooblen = len * mtd->oobsize;
-
- if (chip == -1) {
- /* Note that numblocks is 2 * (real numblocks) here, see i+=2 below as it
- * makes shifting and masking less painful */
- numblocks = mtd->size >> (this->bbt_erase_shift - 1);
- startblock = 0;
- from = 0;
- } else {
- if (chip >= this->numchips) {
- printk (KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
- chip + 1, this->numchips);
- return;
- }
- numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
- startblock = chip * numblocks;
- numblocks += startblock;
- from = startblock << (this->bbt_erase_shift - 1);
- }
-
- for (i = startblock; i < numblocks;) {
- nand_read_raw (mtd, buf, from, readlen, ooblen);
- for (j = 0; j < len; j++) {
- if (check_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) {
- this->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
- i >> 1, (unsigned int) from);
- break;
- }
- }
- i += 2;
- from += (1 << this->bbt_erase_shift);
- }
-}
-
-/**
- * search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- *
- * Read the bad block table by searching for a given ident pattern.
- * Search is preformed either from the beginning up or from the end of
- * the device downwards. The search starts always at the start of a
- * block.
- * If the option NAND_BBT_PERCHIP is given, each chip is searched
- * for a bbt, which contains the bad block information of this chip.
- * This is neccecary to provide support for certain DOC devices.
- *
- * The bbt ident pattern resides in the oob area of the first page
- * in a block.
- */
-static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
-{
- struct nand_chip *this = mtd->priv;
- int i, chips;
- int bits, startblock, block, dir;
- int scanlen = mtd->oobblock + mtd->oobsize;
- int bbtblocks;
-
- /* Search direction top -> down ? */
- if (td->options & NAND_BBT_LASTBLOCK) {
- startblock = (mtd->size >> this->bbt_erase_shift) -1;
- dir = -1;
- } else {
- startblock = 0;
- dir = 1;
- }
-
- /* Do we have a bbt per chip ? */
- if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- bbtblocks = this->chipsize >> this->bbt_erase_shift;
- startblock &= bbtblocks - 1;
- } else {
- chips = 1;
- bbtblocks = mtd->size >> this->bbt_erase_shift;
- }
-
- /* Number of bits for each erase block in the bbt */
- bits = td->options & NAND_BBT_NRBITS_MSK;
-
- for (i = 0; i < chips; i++) {
- /* Reset version information */
- td->version[i] = 0;
- td->pages[i] = -1;
- /* Scan the maximum number of blocks */
- for (block = 0; block < td->maxblocks; block++) {
- int actblock = startblock + dir * block;
- /* Read first page */
- nand_read_raw (mtd, buf, actblock << this->bbt_erase_shift, mtd->oobblock, mtd->oobsize);
- if (!check_pattern(buf, scanlen, mtd->oobblock, td)) {
- td->pages[i] = actblock << (this->bbt_erase_shift - this->page_shift);
- if (td->options & NAND_BBT_VERSION) {
- td->version[i] = buf[mtd->oobblock + td->veroffs];
- }
- break;
- }
- }
- startblock += this->chipsize >> this->bbt_erase_shift;
- }
- /* Check, if we found a bbt for each requested chip */
- for (i = 0; i < chips; i++) {
- if (td->pages[i] == -1)
- printk (KERN_WARNING "Bad block table not found for chip %d\n", i);
- else
- printk (KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], td->version[i]);
- }
- return 0;
-}
-
-/**
- * search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- *
- * Search and read the bad block table(s)
-*/
-static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md)
-{
- /* Search the primary table */
- search_bbt (mtd, buf, td);
-
- /* Search the mirror table */
- if (md)
- search_bbt (mtd, buf, md);
-
- /* Force result check */
- return 1;
-}
-
-
-/**
- * write_bbt - [GENERIC] (Re)write the bad block table
- *
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- * @chipsel: selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table
- *
-*/
-static int write_bbt (struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel)
-{
- struct nand_chip *this = mtd->priv;
- struct nand_oobinfo oobinfo;
- struct erase_info einfo;
- int i, j, res, chip = 0;
- int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
- int nrchips, bbtoffs, pageoffs;
- uint8_t msk[4];
- uint8_t rcode = td->reserved_block_code;
- size_t retlen, len = 0;
- loff_t to;
-
- if (!rcode)
- rcode = 0xff;
- /* Write bad block table per chip rather than per device ? */
- if (td->options & NAND_BBT_PERCHIP) {
- numblocks = (int) (this->chipsize >> this->bbt_erase_shift);
- /* Full device write or specific chip ? */
- if (chipsel == -1) {
- nrchips = this->numchips;
- } else {
- nrchips = chipsel + 1;
- chip = chipsel;
- }
- } else {
- numblocks = (int) (mtd->size >> this->bbt_erase_shift);
- nrchips = 1;
- }
-
- /* Loop through the chips */
- for (; chip < nrchips; chip++) {
-
- /* There was already a version of the table, reuse the page
- * This applies for absolute placement too, as we have the
- * page nr. in td->pages.
- */
- if (td->pages[chip] != -1) {
- page = td->pages[chip];
- goto write;
- }
-
- /* Automatic placement of the bad block table */
- /* Search direction top -> down ? */
- if (td->options & NAND_BBT_LASTBLOCK) {
- startblock = numblocks * (chip + 1) - 1;
- dir = -1;
- } else {
- startblock = chip * numblocks;
- dir = 1;
- }
-
- for (i = 0; i < td->maxblocks; i++) {
- int block = startblock + dir * i;
- /* Check, if the block is bad */
- switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) {
- case 0x01:
- case 0x03:
- continue;
- }
- page = block << (this->bbt_erase_shift - this->page_shift);
- /* Check, if the block is used by the mirror table */
- if (!md || md->pages[chip] != page)
- goto write;
- }
- printk (KERN_ERR "No space left to write bad block table\n");
- return -ENOSPC;
-write:
-
- /* Set up shift count and masks for the flash table */
- bits = td->options & NAND_BBT_NRBITS_MSK;
- switch (bits) {
- case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x01; break;
- case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x03; break;
- case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[2] = ~rcode; msk[3] = 0x0f; break;
- case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[2] = ~rcode; msk[3] = 0xff; break;
- default: return -EINVAL;
- }
-
- bbtoffs = chip * (numblocks >> 2);
-
- to = ((loff_t) page) << this->page_shift;
-
- memcpy (&oobinfo, this->autooob, sizeof(oobinfo));
- oobinfo.useecc = MTD_NANDECC_PLACEONLY;
-
- /* Must we save the block contents ? */
- if (td->options & NAND_BBT_SAVECONTENT) {
- /* Make it block aligned */
- to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
- len = 1 << this->bbt_erase_shift;
- res = mtd->read_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
- if (res < 0) {
- if (retlen != len) {
- printk (KERN_INFO "nand_bbt: Error reading block for writing the bad block table\n");
- return res;
- }
- printk (KERN_WARNING "nand_bbt: ECC error while reading block for writing bad block table\n");
- }
- /* Calc the byte offset in the buffer */
- pageoffs = page - (int)(to >> this->page_shift);
- offs = pageoffs << this->page_shift;
- /* Preset the bbt area with 0xff */
- memset (&buf[offs], 0xff, (size_t)(numblocks >> sft));
- /* Preset the bbt's oob area with 0xff */
- memset (&buf[len + pageoffs * mtd->oobsize], 0xff,
- ((len >> this->page_shift) - pageoffs) * mtd->oobsize);
- if (td->options & NAND_BBT_VERSION) {
- buf[len + (pageoffs * mtd->oobsize) + td->veroffs] = td->version[chip];
- }
- } else {
- /* Calc length */
- len = (size_t) (numblocks >> sft);
- /* Make it page aligned ! */
- len = (len + (mtd->oobblock-1)) & ~(mtd->oobblock-1);
- /* Preset the buffer with 0xff */
- memset (buf, 0xff, len + (len >> this->page_shift) * mtd->oobsize);
- offs = 0;
- /* Pattern is located in oob area of first page */
- memcpy (&buf[len + td->offs], td->pattern, td->len);
- if (td->options & NAND_BBT_VERSION) {
- buf[len + td->veroffs] = td->version[chip];
- }
- }
-
- /* walk through the memory table */
- for (i = 0; i < numblocks; ) {
- uint8_t dat;
- dat = this->bbt[bbtoffs + (i >> 2)];
- for (j = 0; j < 4; j++ , i++) {
- int sftcnt = (i << (3 - sft)) & sftmsk;
- /* Do not store the reserved bbt blocks ! */
- buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
- dat >>= 2;
- }
- }
-
- memset (&einfo, 0, sizeof (einfo));
- einfo.mtd = mtd;
- einfo.addr = (unsigned long) to;
- einfo.len = 1 << this->bbt_erase_shift;
- res = nand_erase_nand (mtd, &einfo, 1);
- if (res < 0) {
- printk (KERN_WARNING "nand_bbt: Error during block erase: %d\n", res);
- return res;
- }
-
- res = mtd->write_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
- if (res < 0) {
- printk (KERN_WARNING "nand_bbt: Error while writing bad block table %d\n", res);
- return res;
- }
- printk (KERN_DEBUG "Bad block table written to 0x%08x, version 0x%02X\n",
- (unsigned int) to, td->version[chip]);
-
- /* Mark it as used */
- td->pages[chip] = page;
- }
- return 0;
-}
-
-/**
- * nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function creates a memory based bbt by scanning the device
- * for manufacturer / software marked good / bad blocks
-*/
-static int nand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
-{
- struct nand_chip *this = mtd->priv;
-
- /* Ensure that we only scan for the pattern and nothing else */
- bd->options = 0;
- create_bbt (mtd, this->data_buf, bd, -1);
- return 0;
-}
-
-/**
- * check_create - [GENERIC] create and write bbt(s) if neccecary
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks the results of the previous call to read_bbt
- * and creates / updates the bbt(s) if neccecary
- * Creation is neccecary if no bbt was found for the chip/device
- * Update is neccecary if one of the tables is missing or the
- * version nr. of one table is less than the other
-*/
-static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
-{
- int i, chips, writeops, chipsel, res;
- struct nand_chip *this = mtd->priv;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
- struct nand_bbt_descr *rd, *rd2;
-
- /* Do we have a bbt per chip ? */
- if (td->options & NAND_BBT_PERCHIP)
- chips = this->numchips;
- else
- chips = 1;
-
- for (i = 0; i < chips; i++) {
- writeops = 0;
- rd = NULL;
- rd2 = NULL;
- /* Per chip or per device ? */
- chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
- /* Mirrored table avilable ? */
- if (md) {
- if (td->pages[i] == -1 && md->pages[i] == -1) {
- writeops = 0x03;
- goto create;
- }
-
- if (td->pages[i] == -1) {
- rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
- goto writecheck;
- }
-
- if (md->pages[i] == -1) {
- rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
- goto writecheck;
- }
-
- if (td->version[i] == md->version[i]) {
- rd = td;
- if (!(td->options & NAND_BBT_VERSION))
- rd2 = md;
- goto writecheck;
- }
-
- if (((int8_t) (td->version[i] - md->version[i])) > 0) {
- rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
- } else {
- rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
- }
-
- goto writecheck;
-
- } else {
- if (td->pages[i] == -1) {
- writeops = 0x01;
- goto create;
- }
- rd = td;
- goto writecheck;
- }
-create:
- /* Create the bad block table by scanning the device ? */
- if (!(td->options & NAND_BBT_CREATE))
- continue;
-
- /* Create the table in memory by scanning the chip(s) */
- create_bbt (mtd, buf, bd, chipsel);
-
- td->version[i] = 1;
- if (md)
- md->version[i] = 1;
-writecheck:
- /* read back first ? */
- if (rd)
- read_abs_bbt (mtd, buf, rd, chipsel);
- /* If they weren't versioned, read both. */
- if (rd2)
- read_abs_bbt (mtd, buf, rd2, chipsel);
-
- /* Write the bad block table to the device ? */
- if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
- res = write_bbt (mtd, buf, td, md, chipsel);
- if (res < 0)
- return res;
- }
-
- /* Write the mirror bad block table to the device ? */
- if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt (mtd, buf, md, td, chipsel);
- if (res < 0)
- return res;
- }
- }
- return 0;
-}
-
-/**
- * mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
- * @td: bad block table descriptor
- *
- * The bad block table regions are marked as "bad" to prevent
- * accidental erasures / writes. The regions are identified by
- * the mark 0x02.
-*/
-static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
-{
- struct nand_chip *this = mtd->priv;
- int i, j, chips, block, nrblocks, update;
- uint8_t oldval, newval;
-
- /* Do we have a bbt per chip ? */
- if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
- } else {
- chips = 1;
- nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
- }
-
- for (i = 0; i < chips; i++) {
- if ((td->options & NAND_BBT_ABSPAGE) ||
- !(td->options & NAND_BBT_WRITE)) {
- if (td->pages[i] == -1) continue;
- block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
- block <<= 1;
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
- continue;
- }
- update = 0;
- if (td->options & NAND_BBT_LASTBLOCK)
- block = ((i + 1) * nrblocks) - td->maxblocks;
- else
- block = i * nrblocks;
- block <<= 1;
- for (j = 0; j < td->maxblocks; j++) {
- oldval = this->bbt[(block >> 3)];
- newval = oldval | (0x2 << (block & 0x06));
- this->bbt[(block >> 3)] = newval;
- if (oldval != newval) update = 1;
- block += 2;
- }
- /* If we want reserved blocks to be recorded to flash, and some
- new ones have been marked, then we need to update the stored
- bbts. This should only happen once. */
- if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
- }
-}
-
-/**
- * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks, if a bad block table(s) is/are already
- * available. If not it scans the device for manufacturer
- * marked good / bad blocks and writes the bad block table(s) to
- * the selected place.
- *
- * The bad block table memory is allocated here. It must be freed
- * by calling the nand_free_bbt function.
- *
-*/
-int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
-{
- struct nand_chip *this = mtd->priv;
- int len, res = 0;
- uint8_t *buf;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
-
- len = mtd->size >> (this->bbt_erase_shift + 2);
- /* Allocate memory (2bit per block) */
- this->bbt = (uint8_t *) kmalloc (len, GFP_KERNEL);
- if (!this->bbt) {
- printk (KERN_ERR "nand_scan_bbt: Out of memory\n");
- return -ENOMEM;
- }
- /* Clear the memory bad block table */
- memset (this->bbt, 0x00, len);
-
- /* If no primary table decriptor is given, scan the device
- * to build a memory based bad block table
- */
- if (!td)
- return nand_memory_bbt(mtd, bd);
-
- /* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
- buf = kmalloc (len, GFP_KERNEL);
- if (!buf) {
- printk (KERN_ERR "nand_bbt: Out of memory\n");
- kfree (this->bbt);
- this->bbt = NULL;
- return -ENOMEM;
- }
-
- /* Is the bbt at a given page ? */
- if (td->options & NAND_BBT_ABSPAGE) {
- res = read_abs_bbts (mtd, buf, td, md);
- } else {
- /* Search the bad block table using a pattern in oob */
- res = search_read_bbts (mtd, buf, td, md);
- }
-
- if (res)
- res = check_create (mtd, buf, bd);
-
- /* Prevent the bbt regions from erasing / writing */
- mark_bbt_region (mtd, td);
- if (md)
- mark_bbt_region (mtd, md);
-
- kfree (buf);
- return res;
-}
-
-
-/**
- * nand_update_bbt - [NAND Interface] update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
- *
- * The function updates the bad block table(s)
-*/
-int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
-{
- struct nand_chip *this = mtd->priv;
- int len, res = 0, writeops = 0;
- int chip, chipsel;
- uint8_t *buf;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
-
- if (!this->bbt || !td)
- return -EINVAL;
-
- len = mtd->size >> (this->bbt_erase_shift + 2);
- /* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
- buf = kmalloc (len, GFP_KERNEL);
- if (!buf) {
- printk (KERN_ERR "nand_update_bbt: Out of memory\n");
- return -ENOMEM;
- }
-
- writeops = md != NULL ? 0x03 : 0x01;
-
- /* Do we have a bbt per chip ? */
- if (td->options & NAND_BBT_PERCHIP) {
- chip = (int) (offs >> this->chip_shift);
- chipsel = chip;
- } else {
- chip = 0;
- chipsel = -1;
- }
-
- td->version[chip]++;
- if (md)
- md->version[chip]++;
-
- /* Write the bad block table to the device ? */
- if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
- res = write_bbt (mtd, buf, td, md, chipsel);
- if (res < 0)
- goto out;
- }
- /* Write the mirror bad block table to the device ? */
- if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt (mtd, buf, md, td, chipsel);
- }
-
-out:
- kfree (buf);
- return res;
-}
-
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks
- *
- * The memory based patterns just
- */
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-
-static struct nand_bbt_descr smallpage_memorybased = {
- .options = 0,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr smallpage_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0x20,
- .len = 6,
- .pattern = scan_agand_pattern
-};
-
-/* Generic flash bbt decriptors
-*/
-static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 8,
- .len = 4,
- .veroffs = 12,
- .maxblocks = 4,
- .pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 8,
- .len = 4,
- .veroffs = 12,
- .maxblocks = 4,
- .pattern = mirror_pattern
-};
-
-/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
- *
- * This function selects the default bad block table
- * support for the device and calls the nand_scan_bbt function
- *
-*/
-int nand_default_bbt (struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
-
- /* Default for AG-AND. We must use a flash based
- * bad block table as the devices have factory marked
- * _good_ blocks. Erasing those blocks leads to loss
- * of the good / bad information, so we _must_ store
- * this information in a good / bad table during
- * startup
- */
- if (this->options & NAND_IS_AND) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- this->options |= NAND_USE_FLASH_BBT;
- return nand_scan_bbt (mtd, &agand_flashbased);
- }
-
- /* Is a flash based bad block table requested ? */
- if (this->options & NAND_USE_FLASH_BBT) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- if (mtd->oobblock > 512)
- return nand_scan_bbt (mtd, &largepage_flashbased);
- else
- return nand_scan_bbt (mtd, &smallpage_flashbased);
- } else {
- this->bbt_td = NULL;
- this->bbt_md = NULL;
- if (mtd->oobblock > 512)
- return nand_scan_bbt (mtd, &largepage_memorybased);
- else
- return nand_scan_bbt (mtd, &smallpage_memorybased);
- }
-}
-
-/**
- * nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
- * @offs: offset in the device
- * @allowbbt: allow access to bad block table region
- *
-*/
-int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt)
-{
- struct nand_chip *this = mtd->priv;
- int block;
- uint8_t res;
-
- /* Get block number * 2 */
- block = (int) (offs >> (this->bbt_erase_shift - 1));
- res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
-
- DEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
- (unsigned int)offs, res, block >> 1);
-
- switch ((int)res) {
- case 0x00: return 0;
- case 0x01: return 1;
- case 0x02: return allowbbt ? 0 : 1;
- }
- return 1;
-}
-
-EXPORT_SYMBOL (nand_scan_bbt);
-EXPORT_SYMBOL (nand_default_bbt);
/*
- * This file contains an ECC algorithm from Toshiba that detects and
- * corrects 1 bit errors in a 256 byte block of data.
+ * drivers/mtd/nand_ecc.c
*
- * drivers/mtd/nand/nand_ecc.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * Toshiba America Electronics Components, Inc.
*
- * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
- * Toshiba America Electronics Components, Inc.
+ * $Id: nand_ecc.c,v 1.9 2003/02/20 13:34:19 sjhill Exp $
*
- * $Id: nand_ecc.c,v 1.14 2004/06/16 15:34:37 gleixner Exp $
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * version 2.1 as published by the Free Software Foundation.
*
- * This file is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 or (at your option) any
- * later version.
- *
- * This file is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this file; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * As a special exception, if other files instantiate templates or use
- * macros or inline functions from these files, or you compile these
- * files and link them with other works to produce a work based on these
- * files, these files do not by themselves cause the resulting work to be
- * covered by the GNU General Public License. However the source code for
- * these files must still be made available in accordance with section (3)
- * of the GNU General Public License.
- *
- * This exception does not invalidate any other reasons why a work based on
- * this file might be covered by the GNU General Public License.
+ * This file contains an ECC algorithm from Toshiba that detects and
+ * corrects 1 bit errors in a 256 byte block of data.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mtd/nand_ecc.h>
/*
* Pre-calculated 256-way 1 byte column parity
};
-/**
- * nand_trans_result - [GENERIC] create non-inverted ECC
- * @reg2: line parity reg 2
- * @reg3: line parity reg 3
- * @ecc_code: ecc
- *
+/*
* Creates non-inverted ECC code from line parity
*/
static void nand_trans_result(u_char reg2, u_char reg3,
ecc_code[1] = tmp2;
}
-/**
- * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code for 256 byte block
- * @mtd: MTD block structure
- * @dat: raw data
- * @ecc_code: buffer for ECC
+/*
+ * Calculate 3 byte ECC code for 256 byte block
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+void nand_calculate_ecc (const u_char *dat, u_char *ecc_code)
{
u_char idx, reg1, reg2, reg3;
int j;
ecc_code[0] = ~ecc_code[0];
ecc_code[1] = ~ecc_code[1];
ecc_code[2] = ((~reg1) << 2) | 0x03;
- return 0;
}
-/**
- * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
- * @dat: raw data read from the chip
- * @read_ecc: ECC from the chip
- * @calc_ecc: the ECC calculated from raw data
- *
+/*
* Detect and correct a 1 bit error for 256 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
+int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc)
{
u_char a, b, c, d1, d2, d3, add, bit, i;
* drivers/mtd/nandids.c
*
* Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
- *
- * $Id: nand_ids.c,v 1.10 2004/05/26 13:40:12 gleixner Exp $
+ *
+ *
+ * $Id: nand_ids.c,v 1.4 2003/05/21 15:15:08 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
#include <linux/module.h>
#include <linux/mtd/nand.h>
+
/*
* Chip ID list
-*
-* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-* options
-*
-* Pagesize; 0, 256, 512
-* 0 get this information from the extended chip ID
-+ 256 256 Byte page size
-* 512 512 Byte page size
*/
struct nand_flash_dev nand_flash_ids[] = {
- {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
- {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
- {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
- {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
- {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
- {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
-
- {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
- {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
- {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
- {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
-
- {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
- {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
- {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
- {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
- {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
- {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
- {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
- {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
- {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-
- {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
-
- {"NAND 512MiB 3,3V 8-bit", 0xDC, 512, 512, 0x4000, 0},
-
- /* These are the new chips with large page size. The pagesize
- * and the erasesize is determined from the extended id bytes
- */
- /* 1 Gigabit */
- {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
- {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
-
- /* 2 Gigabit */
- {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
- {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
-
- /* 4 Gigabit */
- {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
- {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
-
- /* 8 Gigabit */
- {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
- {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
-
- /* 16 Gigabit */
- {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
- {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
- {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
-
- /* Renesas AND 1 Gigabit. Those chips do not support extended id and have a strange page/block layout !
- * The chosen minimum erasesize is 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page planes
- * 1 block = 2 pages, but due to plane arrangement the blocks 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7
- * Anyway JFFS2 would increase the eraseblock size so we chose a combined one which can be erased in one go
- * There are more speed improvements for reads and writes possible, but not implemented now
- */
- {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR | NAND_4PAGE_ARRAY},
-
+ {"NAND 1MiB 5V", 0x6e, 20, 0x1000, 1},
+ {"NAND 2MiB 5V", 0x64, 21, 0x1000, 1},
+ {"NAND 4MiB 5V", 0x6b, 22, 0x2000, 0},
+ {"NAND 1MiB 3,3V", 0xe8, 20, 0x1000, 1},
+ {"NAND 1MiB 3,3V", 0xec, 20, 0x1000, 1},
+ {"NAND 2MiB 3,3V", 0xea, 21, 0x1000, 1},
+ {"NAND 4MiB 3,3V", 0xd5, 22, 0x2000, 0},
+ {"NAND 4MiB 3,3V", 0xe3, 22, 0x2000, 0},
+ {"NAND 4MiB 3,3V", 0xe5, 22, 0x2000, 0},
+ {"NAND 8MiB 3,3V", 0xd6, 23, 0x2000, 0},
+ {"NAND 8MiB 3,3V", 0xe6, 23, 0x2000, 0},
+ {"NAND 16MiB 3,3V", 0x73, 24, 0x4000, 0},
+ {"NAND 32MiB 3,3V", 0x75, 25, 0x4000, 0},
+ {"NAND 64MiB 3,3V", 0x76, 26, 0x4000, 0},
+ {"NAND 128MiB 3,3V", 0x79, 27, 0x4000, 0},
{NULL,}
};
{NAND_MFR_SAMSUNG, "Samsung"},
{NAND_MFR_FUJITSU, "Fujitsu"},
{NAND_MFR_NATIONAL, "National"},
- {NAND_MFR_RENESAS, "Renesas"},
- {NAND_MFR_STMICRO, "ST Micro"},
{0x0, "Unknown"}
};
+
EXPORT_SYMBOL (nand_manuf_ids);
EXPORT_SYMBOL (nand_flash_ids);
+++ /dev/null
-/*
- * drivers/mtd/nand/ppchameleonevb.c
- *
- * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- *
- *
- * $Id: ppchameleonevb.c,v 1.2 2004/05/05 22:09:54 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash devices found on the
- * PPChameleon/PPChameleonEVB system.
- * PPChameleon options (autodetected):
- * - BA model: no NAND
- * - ME model: 32MB (Samsung K9F5608U0B)
- * - HI model: 128MB (Samsung K9F1G08UOM)
- * PPChameleonEVB options:
- * - 32MB (Samsung K9F5608U0B)
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <platforms/PPChameleonEVB.h>
-
-#undef USE_READY_BUSY_PIN
-#define USE_READY_BUSY_PIN
-/* see datasheets (tR) */
-#define NAND_BIG_DELAY_US 25
-#define NAND_SMALL_DELAY_US 10
-
-/* handy sizes */
-#define SZ_4M 0x00400000
-#define NAND_SMALL_SIZE 0x02000000
-#define NAND_MTD_NAME "ppchameleon-nand"
-#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
-
-/* GPIO pins used to drive NAND chip mounted on processor module */
-#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
-#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
-#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
-#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
-/* GPIO pins used to drive NAND chip mounted on EVB */
-#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
-#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
-#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
-#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
-
-/*
- * MTD structure for PPChameleonEVB board
- */
-static struct mtd_info *ppchameleon_mtd = NULL;
-static struct mtd_info *ppchameleonevb_mtd = NULL;
-
-/*
- * Module stuff
- */
-static int ppchameleon_fio_pbase = CFG_NAND0_PADDR;
-static int ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
-
-#ifdef MODULE
-MODULE_PARM(ppchameleon_fio_pbase, "i");
-__setup("ppchameleon_fio_pbase=",ppchameleon_fio_pbase);
-MODULE_PARM(ppchameleonevb_fio_pbase, "i");
-__setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase);
-#endif
-
-/* Internal buffers. Page buffer and oob buffer for one block */
-static u_char data_buf[2048 + 64];
-static u_char oob_buf[64 * 64];
-static u_char data_buf_evb[512 + 16];
-static u_char oob_buf_evb[16 * 32];
-
-#ifdef CONFIG_MTD_PARTITIONS
-/*
- * Define static partitions for flash devices
- */
-static struct mtd_partition partition_info_hi[] = {
- { name: "PPChameleon HI Nand Flash",
- offset: 0,
- size: 128*1024*1024 }
-};
-
-static struct mtd_partition partition_info_me[] = {
- { name: "PPChameleon ME Nand Flash",
- offset: 0,
- size: 32*1024*1024 }
-};
-
-static struct mtd_partition partition_info_evb[] = {
- { name: "PPChameleonEVB Nand Flash",
- offset: 0,
- size: 32*1024*1024 }
-};
-
-#define NUM_PARTITIONS 1
-
-extern int parse_cmdline_partitions(struct mtd_info *master,
- struct mtd_partition **pparts,
- const char *mtd_id);
-#endif
-
-
-/*
- * hardware specific access to control-lines
- */
-static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd)
-{
- switch(cmd) {
-
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- }
-}
-
-static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd)
-{
- switch(cmd) {
-
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- }
-}
-
-#ifdef USE_READY_BUSY_PIN
-/*
- * read device ready pin
- */
-static int ppchameleon_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-
-static int ppchameleonevb_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-#endif
-
-#ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
-const char *part_probes_evb[] = { "cmdlinepart", NULL };
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init ppchameleonevb_init (void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- int ppchameleon_fio_base;
- int ppchameleonevb_fio_base;
-
-
- /*********************************
- * Processor module NAND (if any) *
- *********************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) +
- sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!ppchameleon_mtd) {
- printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleon_fio_base = (unsigned long)ioremap(ppchameleon_fio_pbase, SZ_4M);
- if(!ppchameleon_fio_base) {
- printk("ioremap PPChameleon NAND flash failed\n");
- kfree(ppchameleon_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&ppchameleon_mtd[1]);
-
- /* Initialize structures */
- memset((char *) ppchameleon_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleon_mtd->priv = this;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_01
- CLE GPIO_02
- ALE GPIO_03
- R/B GPIO_04
- */
- /* output select */
- out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xC0FFFFFF);
- /* three-state select */
- out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xC0FFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFF3FFFFF);
- /* high-impedecence */
- out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned*)GPIO0_ISR1H, (in_be32((volatile unsigned*)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleon_fio_base;
- this->IO_ADDR_W = ppchameleon_fio_base;
- this->hwcontrol = ppchameleon_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleon_device_ready;
-#endif
- this->chip_delay = NAND_BIG_DELAY_US;
- /* ECC mode */
- this->eccmode = NAND_ECC_SOFT;
-
- /* Set internal data buffer */
- this->data_buf = data_buf;
- this->oob_buf = oob_buf;
-
- /* Scan to find existence of the device (it could not be mounted) */
- if (nand_scan (ppchameleon_mtd, 1)) {
- iounmap((void *)ppchameleon_fio_base);
- kfree (ppchameleon_mtd);
- goto nand_evb_init;
- }
-
-#ifndef USE_READY_BUSY_PIN
- /* Adjust delay if necessary */
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- this->chip_delay = NAND_SMALL_DELAY_US;
-#endif
-
-#ifdef CONFIG_MTD_PARTITIONS
- ppchameleon_mtd->name = "ppchameleon-nand";
- mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0)
- {
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- mtd_parts = partition_info_me;
- else
- mtd_parts = partition_info_hi;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
-
-nand_evb_init:
- /****************************
- * EVB NAND (always present) *
- ****************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) +
- sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!ppchameleonevb_mtd) {
- printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleonevb_fio_base = (unsigned long)ioremap(ppchameleonevb_fio_pbase, SZ_4M);
- if(!ppchameleonevb_fio_base) {
- printk("ioremap PPChameleonEVB NAND flash failed\n");
- kfree(ppchameleonevb_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&ppchameleonevb_mtd[1]);
-
- /* Initialize structures */
- memset((char *) ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleonevb_mtd->priv = this;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_14
- CLE GPIO_15
- ALE GPIO_16
- R/B GPIO_31
- */
- /* output select */
- out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned*)GPIO0_OSRL, in_be32((volatile unsigned*)GPIO0_OSRL) & 0x3FFFFFFF);
- /* three-state select */
- out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0x3FFFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN | NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0xFFFFFFFC);
- /* high-impedecence */
- out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned*)GPIO0_ISR1L, (in_be32((volatile unsigned*)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
-#endif
-
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleonevb_fio_base;
- this->IO_ADDR_W = ppchameleonevb_fio_base;
- this->hwcontrol = ppchameleonevb_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleonevb_device_ready;
-#endif
- this->chip_delay = NAND_SMALL_DELAY_US;
-
- /* ECC mode */
- this->eccmode = NAND_ECC_SOFT;
-
- /* Set internal data buffer */
- this->data_buf = data_buf_evb;
- this->oob_buf = oob_buf_evb;
-
- /* Scan to find existence of the device */
- if (nand_scan (ppchameleonevb_mtd, 1)) {
- iounmap((void *)ppchameleonevb_fio_base);
- kfree (ppchameleonevb_mtd);
- return -ENXIO;
- }
-
-#ifdef CONFIG_MTD_PARTITIONS
- ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
- mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0)
- {
- mtd_parts = partition_info_evb;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-module_init(ppchameleonevb_init);
-
-/*
- * Clean up routine
- */
-static void __exit ppchameleonevb_cleanup (void)
-{
- struct nand_chip *this = (struct nand_chip *) &ppchameleonevb_mtd[1];
-
- /* Unregister the device */
- del_mtd_device (ppchameleonevb_mtd);
-
- /* Free internal data buffer */
- kfree (this->data_buf);
-
- /* Free the MTD device structure */
- kfree (ppchameleonevb_mtd);
-}
-module_exit(ppchameleonevb_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
-MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
* to controllines (due to change in nand.c)
* page_cache added
*
- * $Id: spia.c,v 1.21 2003/07/11 15:12:29 dwmw2 Exp $
+ * $Id: spia.c,v 1.19 2003/04/20 07:24:40 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* a 64Mibit (8MiB x 8 bits) NAND flash device.
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
/*
* Values specific to the SPIA board (used with EP7212 processor)
*/
-#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
-#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
-#define SPIA_PEDR 0x0080 /*
+#define SPIA_IO_ADDR = 0xd0000000 /* Start of EP7212 IO address space */
+#define SPIA_FIO_ADDR = 0xf0000000 /* Address where flash is mapped */
+#define SPIA_PEDR = 0x0080 /*
* IO offset to Port E data register
* where the CLE, ALE and NCE pins
* are wired to.
*/
-#define SPIA_PEDDR 0x00c0 /*
+#define SPIA_PEDDR = 0x00c0 /*
* IO offset to Port E data direction
* register so we can control the IO
* lines.
MODULE_PARM(spia_pedr, "i");
MODULE_PARM(spia_peddr, "i");
+__setup("spia_io_base=",spia_io_base);
+__setup("spia_fio_base=",spia_fio_base);
+__setup("spia_pedr=",spia_pedr);
+__setup("spia_peddr=",spia_peddr);
+
/*
* Define partitions for flash device
*/
/*
* hardware specific access to control-lines
*/
-static void spia_hwcontrol(struct mtd_info *mtd, int cmd){
+void spia_hwcontrol(int cmd){
switch(cmd){
this->chip_delay = 15;
/* Scan to find existence of the device */
- if (nand_scan (spia_mtd, 1)) {
+ if (nand_scan (spia_mtd)) {
kfree (spia_mtd);
return -ENXIO;
}
+++ /dev/null
-/*
- * drivers/mtd/nand/toto.c
- *
- * Copyright (c) 2003 Texas Instruments
- *
- * Derived from drivers/mtd/autcpu12.c
- *
- * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TI fido board. It supports 32MiB and 64MiB cards
- *
- * $Id: toto.c,v 1.2 2003/10/21 10:04:58 dwmw2 Exp $
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/arch/hardware.h>
-#include <asm/sizes.h>
-#include <asm/arch/toto.h>
-#include <asm/arch-omap1510/hardware.h>
-#include <asm/arch/gpio.h>
-
-/*
- * MTD structure for TOTO board
- */
-static struct mtd_info *toto_mtd = NULL;
-
-static int toto_io_base = OMAP_FLASH_1_BASE;
-
-#define CONFIG_NAND_WORKAROUND 1
-
-#define NAND_NCE 0x4000
-#define NAND_CLE 0x1000
-#define NAND_ALE 0x0002
-#define NAND_MASK (NAND_CLE | NAND_ALE | NAND_NCE)
-
-#define T_NAND_CTL_CLRALE(iob) gpiosetout(NAND_ALE, 0)
-#define T_NAND_CTL_SETALE(iob) gpiosetout(NAND_ALE, NAND_ALE)
-#ifdef CONFIG_NAND_WORKAROUND /* "some" dev boards busted, blue wired to rts2 :( */
-#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0); rts2setout(2, 2)
-#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE); rts2setout(2, 0)
-#else
-#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0)
-#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE)
-#endif
-#define T_NAND_CTL_SETNCE(iob) gpiosetout(NAND_NCE, 0)
-#define T_NAND_CTL_CLRNCE(iob) gpiosetout(NAND_NCE, NAND_NCE)
-
-/*
- * Define partitions for flash devices
- */
-
-static struct mtd_partition partition_info64M[] = {
- { .name = "toto kernel partition 1",
- .offset = 0,
- .size = 2 * SZ_1M },
- { .name = "toto file sys partition 2",
- .offset = 2 * SZ_1M,
- .size = 14 * SZ_1M },
- { .name = "toto user partition 3",
- .offset = 16 * SZ_1M,
- .size = 16 * SZ_1M },
- { .name = "toto devboard extra partition 4",
- .offset = 32 * SZ_1M,
- .size = 32 * SZ_1M },
-};
-
-static struct mtd_partition partition_info32M[] = {
- { .name = "toto kernel partition 1",
- .offset = 0,
- .size = 2 * SZ_1M },
- { .name = "toto file sys partition 2",
- .offset = 2 * SZ_1M,
- .size = 14 * SZ_1M },
- { .name = "toto user partition 3",
- .offset = 16 * SZ_1M,
- .size = 16 * SZ_1M },
-};
-
-#define NUM_PARTITIONS32M 3
-#define NUM_PARTITIONS64M 4
-/*
- * hardware specific access to control-lines
-*/
-
-static void toto_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-
- udelay(1); /* hopefully enough time for tc make proceding write to clear */
- switch(cmd){
-
- case NAND_CTL_SETCLE: T_NAND_CTL_SETCLE(cmd); break;
- case NAND_CTL_CLRCLE: T_NAND_CTL_CLRCLE(cmd); break;
-
- case NAND_CTL_SETALE: T_NAND_CTL_SETALE(cmd); break;
- case NAND_CTL_CLRALE: T_NAND_CTL_CLRALE(cmd); break;
-
- case NAND_CTL_SETNCE: T_NAND_CTL_SETNCE(cmd); break;
- case NAND_CTL_CLRNCE: T_NAND_CTL_CLRNCE(cmd); break;
- }
- udelay(1); /* allow time to ensure gpio state to over take memory write */
-}
-
-/*
- * Main initialization routine
- */
-int __init toto_init (void)
-{
- struct nand_chip *this;
- int err = 0;
-
- /* Allocate memory for MTD device structure and private data */
- toto_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
- GFP_KERNEL);
- if (!toto_mtd) {
- printk (KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
- err = -ENOMEM;
- goto out;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&toto_mtd[1]);
-
- /* Initialize structures */
- memset((char *) toto_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- toto_mtd->priv = this;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = toto_io_base;
- this->IO_ADDR_W = toto_io_base;
- this->hwcontrol = toto_hwcontrol;
- this->dev_ready = NULL;
- /* 25 us command delay time */
- this->chip_delay = 30;
- this->eccmode = NAND_ECC_SOFT;
-
- /* Scan to find existance of the device */
- if (nand_scan (toto_mtd, 1)) {
- err = -ENXIO;
- goto out_mtd;
- }
-
- /* Allocate memory for internal data buffer */
- this->data_buf = kmalloc (sizeof(u_char) * (toto_mtd->oobblock + toto_mtd->oobsize), GFP_KERNEL);
- if (!this->data_buf) {
- printk (KERN_WARNING "Unable to allocate NAND data buffer for toto.\n");
- err = -ENOMEM;
- goto out_mtd;
- }
-
- /* Register the partitions */
- switch(toto_mtd->size){
- case SZ_64M: add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); break;
- case SZ_32M: add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); break;
- default: {
- printk (KERN_WARNING "Unsupported Nand device\n");
- err = -ENXIO;
- goto out_buf;
- }
- }
-
- gpioreserve(NAND_MASK); /* claim our gpios */
- archflashwp(0,0); /* open up flash for writing */
-
- goto out;
-
-out_buf:
- kfree (this->data_buf);
-out_mtd:
- kfree (toto_mtd);
-out:
- return err;
-}
-
-module_init(toto_init);
-
-/*
- * Clean up routine
- */
-static void __exit toto_cleanup (void)
-{
- struct nand_chip *this = (struct nand_chip *) &toto_mtd[1];
-
- /* Unregister partitions */
- del_mtd_partitions(toto_mtd);
-
- /* Unregister the device */
- del_mtd_device (toto_mtd);
-
- /* Free internal data buffers */
- kfree (this->data_buf);
-
- /* Free the MTD device structure */
- kfree (toto_mtd);
-
- /* stop flash writes */
- archflashwp(0,1);
-
- /* release gpios to system */
- gpiorelease(NAND_MASK);
-}
-module_exit(toto_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Richard Woodruff <r-woodruff2@ti.com>");
-MODULE_DESCRIPTION("Glue layer for NAND flash on toto board");
+++ /dev/null
-/*
- * drivers/mtd/tx4925ndfmc.c
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * Toshiba RBTX4925 reference board, which is a SmartMediaCard. It supports
- * 16MiB, 32MiB and 64MiB cards.
- *
- * Author: MontaVista Software, Inc. source@mvista.com
- *
- * Derived from drivers/mtd/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * $Id: tx4925ndfmc.c,v 1.3 2004/07/20 02:44:26 dwmw2 Exp $
- *
- * Copyright (C) 2001 Toshiba Corporation
- *
- * 2003 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/tx4925/tx4925_nand.h>
-
-extern struct nand_oobinfo jffs2_oobinfo;
-
-/*
- * MTD structure for RBTX4925 board
- */
-static struct mtd_info *tx4925ndfmc_mtd = NULL;
-
-/*
- * Define partitions for flash devices
- */
-
-static struct mtd_partition partition_info16k[] = {
- { .name = "RBTX4925 flash partition 1",
- .offset = 0,
- .size = 8 * 0x00100000 },
- { .name = "RBTX4925 flash partition 2",
- .offset = 8 * 0x00100000,
- .size = 8 * 0x00100000 },
-};
-
-static struct mtd_partition partition_info32k[] = {
- { .name = "RBTX4925 flash partition 1",
- .offset = 0,
- .size = 8 * 0x00100000 },
- { .name = "RBTX4925 flash partition 2",
- .offset = 8 * 0x00100000,
- .size = 24 * 0x00100000 },
-};
-
-static struct mtd_partition partition_info64k[] = {
- { .name = "User FS",
- .offset = 0,
- .size = 16 * 0x00100000 },
- { .name = "RBTX4925 flash partition 2",
- .offset = 16 * 0x00100000,
- .size = 48 * 0x00100000},
-};
-
-static struct mtd_partition partition_info128k[] = {
- { .name = "Skip bad section",
- .offset = 0,
- .size = 16 * 0x00100000 },
- { .name = "User FS",
- .offset = 16 * 0x00100000,
- .size = 112 * 0x00100000 },
-};
-#define NUM_PARTITIONS16K 2
-#define NUM_PARTITIONS32K 2
-#define NUM_PARTITIONS64K 2
-#define NUM_PARTITIONS128K 2
-
-/*
- * hardware specific access to control-lines
-*/
-static void tx4925ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-
- switch(cmd){
-
- case NAND_CTL_SETCLE:
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CLE;
- break;
- case NAND_CTL_CLRCLE:
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CLE;
- break;
- case NAND_CTL_SETALE:
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ALE;
- break;
- case NAND_CTL_CLRALE:
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ALE;
- break;
- case NAND_CTL_SETNCE:
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CE;
- break;
- case NAND_CTL_CLRNCE:
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CE;
- break;
- case NAND_CTL_SETWP:
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_WE;
- break;
- case NAND_CTL_CLRWP:
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_WE;
- break;
- }
-}
-
-/*
-* read device ready pin
-*/
-static int tx4925ndfmc_device_ready(struct mtd_info *mtd)
-{
- int ready;
- ready = (tx4925_ndfmcptr->sr & TX4925_NDSFR_BUSY) ? 0 : 1;
- return ready;
-}
-void tx4925ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- /* reset first */
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_MASK;
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_ENAB;
-}
-static void tx4925ndfmc_disable_ecc(void)
-{
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
-}
-static void tx4925ndfmc_enable_read_ecc(void)
-{
- tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
- tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_READ;
-}
-void tx4925ndfmc_readecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code){
- int i;
- u_char *ecc = ecc_code;
- tx4925ndfmc_enable_read_ecc();
- for (i = 0;i < 6;i++,ecc++)
- *ecc = tx4925_read_nfmc(&(tx4925_ndfmcptr->dtr));
- tx4925ndfmc_disable_ecc();
-}
-void tx4925ndfmc_device_setup(void)
-{
-
- *(unsigned char *)0xbb005000 &= ~0x08;
-
- /* reset NDFMC */
- tx4925_ndfmcptr->rstr |= TX4925_NDFRSTR_RST;
- while (tx4925_ndfmcptr->rstr & TX4925_NDFRSTR_RST);
-
- /* setup BusSeparete, Hold Time, Strobe Pulse Width */
- tx4925_ndfmcptr->mcr = TX4925_BSPRT ? TX4925_NDFMCR_BSPRT : 0;
- tx4925_ndfmcptr->spr = TX4925_HOLD << 4 | TX4925_SPW;
-}
-static u_char tx4925ndfmc_nand_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- return tx4925_read_nfmc(this->IO_ADDR_R);
-}
-
-static void tx4925ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
-{
- struct nand_chip *this = mtd->priv;
- tx4925_write_nfmc(byte, this->IO_ADDR_W);
-}
-
-static void tx4925ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- tx4925_write_nfmc(buf[i], this->IO_ADDR_W);
-}
-
-static void tx4925ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- buf[i] = tx4925_read_nfmc(this->IO_ADDR_R);
-}
-
-static int tx4925ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- if (buf[i] != tx4925_read_nfmc(this->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Send command to NAND device
- */
-static void tx4925ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- register struct nand_chip *this = mtd->priv;
-
- /* Begin command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- this->write_byte(mtd, readcmd);
- }
- this->write_byte(mtd, command);
-
- /* Set ALE and clear CLE to start address cycle */
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
-
- if (column != -1 || page_addr != -1) {
- this->hwcontrol(mtd, NAND_CTL_SETALE);
-
- /* Serially input address */
- if (column != -1)
- this->write_byte(mtd, column);
- if (page_addr != -1) {
- this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
- this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
- /* One more address cycle for higher density devices */
- if (mtd->size & 0x0c000000)
- this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
- }
- /* Latch in address */
- this->hwcontrol(mtd, NAND_CTL_CLRALE);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- /* Turn off WE */
- this->hwcontrol (mtd, NAND_CTL_CLRWP);
- return;
-
- case NAND_CMD_SEQIN:
- /* Turn on WE */
- this->hwcontrol (mtd, NAND_CTL_SETWP);
- return;
-
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- this->write_byte(mtd, NAND_CMD_STATUS);
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partitio
-n **pparts, char *);
-#endif
-
-/*
- * Main initialization routine
- */
-extern int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
-int __init tx4925ndfmc_init (void)
-{
- struct nand_chip *this;
- int err = 0;
-
- /* Allocate memory for MTD device structure and private data */
- tx4925ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
- GFP_KERNEL);
- if (!tx4925ndfmc_mtd) {
- printk ("Unable to allocate RBTX4925 NAND MTD device structure.\n");
- err = -ENOMEM;
- goto out;
- }
-
- tx4925ndfmc_device_setup();
-
- /* io is indirect via a register so don't need to ioremap address */
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&tx4925ndfmc_mtd[1]);
-
- /* Initialize structures */
- memset((char *) tx4925ndfmc_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- tx4925ndfmc_mtd->priv = this;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = (unsigned long)&(tx4925_ndfmcptr->dtr);
- this->IO_ADDR_W = (unsigned long)&(tx4925_ndfmcptr->dtr);
- this->hwcontrol = tx4925ndfmc_hwcontrol;
- this->enable_hwecc = tx4925ndfmc_enable_hwecc;
- this->calculate_ecc = tx4925ndfmc_readecc;
- this->correct_data = nand_correct_data;
- this->eccmode = NAND_ECC_HW6_512;
- this->dev_ready = tx4925ndfmc_device_ready;
- /* 20 us command delay time */
- this->chip_delay = 20;
- this->read_byte = tx4925ndfmc_nand_read_byte;
- this->write_byte = tx4925ndfmc_nand_write_byte;
- this->cmdfunc = tx4925ndfmc_nand_command;
- this->write_buf = tx4925ndfmc_nand_write_buf;
- this->read_buf = tx4925ndfmc_nand_read_buf;
- this->verify_buf = tx4925ndfmc_nand_verify_buf;
-
- /* Scan to find existance of the device */
- if (nand_scan (tx4925ndfmc_mtd, 1)) {
- err = -ENXIO;
- goto out_ior;
- }
-
- /* Allocate memory for internal data buffer */
- this->data_buf = kmalloc (sizeof(u_char) * (tx4925ndfmc_mtd->oobblock + tx4925ndfmc_mtd->oobsize), GFP_KERNEL);
- if (!this->data_buf) {
- printk ("Unable to allocate NAND data buffer for RBTX4925.\n");
- err = -ENOMEM;
- goto out_ior;
- }
-
- /* Register the partitions */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- {
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- mtd_parts_nb = parse_cmdline_partitions(tx4925ndfmc_mtd, &mtd_parts, "tx4925ndfmc");
- if (mtd_parts_nb > 0)
- add_mtd_partitions(tx4925ndfmc_mtd, mtd_parts, mtd_parts_nb);
- else
- add_mtd_device(tx4925ndfmc_mtd);
- }
-#else /* ifdef CONFIG_MTD_CMDLINE_PARTS */
- switch(tx4925ndfmc_mtd->size){
- case 0x01000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info16k, NUM_PARTITIONS16K); break;
- case 0x02000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info32k, NUM_PARTITIONS32K); break;
- case 0x04000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info64k, NUM_PARTITIONS64K); break;
- case 0x08000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info128k, NUM_PARTITIONS128K); break;
- default: {
- printk ("Unsupported SmartMedia device\n");
- err = -ENXIO;
- goto out_buf;
- }
- }
-#endif /* ifdef CONFIG_MTD_CMDLINE_PARTS */
- goto out;
-
-out_buf:
- kfree (this->data_buf);
-out_ior:
-out:
- return err;
-}
-
-module_init(tx4925ndfmc_init);
-
-/*
- * Clean up routine
- */
-#ifdef MODULE
-static void __exit tx4925ndfmc_cleanup (void)
-{
- struct nand_chip *this = (struct nand_chip *) &tx4925ndfmc_mtd[1];
-
- /* Unregister partitions */
- del_mtd_partitions(tx4925ndfmc_mtd);
-
- /* Unregister the device */
- del_mtd_device (tx4925ndfmc_mtd);
-
- /* Free internal data buffers */
- kfree (this->data_buf);
-
- /* Free the MTD device structure */
- kfree (tx4925ndfmc_mtd);
-}
-module_exit(tx4925ndfmc_cleanup);
-#endif
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
-MODULE_DESCRIPTION("Glue layer for SmartMediaCard on Toshiba RBTX4925");
+++ /dev/null
-/*
- * drivers/mtd/nand/tx4938ndfmc.c
- *
- * Overview:
- * This is a device driver for the NAND flash device connected to
- * TX4938 internal NAND Memory Controller.
- * TX4938 NDFMC is almost same as TX4925 NDFMC, but register size are 64 bit.
- *
- * Author: source@mvista.com
- *
- * Based on spia.c by Steven J. Hill
- *
- * $Id: tx4938ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $
- *
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <asm/bootinfo.h>
-#include <linux/delay.h>
-#include <asm/tx4938/rbtx4938.h>
-
-extern struct nand_oobinfo jffs2_oobinfo;
-
-/*
- * MTD structure for TX4938 NDFMC
- */
-static struct mtd_info *tx4938ndfmc_mtd;
-
-/*
- * Define partitions for flash device
- */
-#define flush_wb() (void)tx4938_ndfmcptr->mcr;
-
-#define NUM_PARTITIONS 3
-#define NUMBER_OF_CIS_BLOCKS 24
-#define SIZE_OF_BLOCK 0x00004000
-#define NUMBER_OF_BLOCK_PER_ZONE 1024
-#define SIZE_OF_ZONE (NUMBER_OF_BLOCK_PER_ZONE * SIZE_OF_BLOCK)
-#ifndef CONFIG_MTD_CMDLINE_PARTS
-/*
- * You can use the following sample of MTD partitions
- * on the NAND Flash Memory 32MB or more.
- *
- * The following figure shows the image of the sample partition on
- * the 32MB NAND Flash Memory.
- *
- * Block No.
- * 0 +-----------------------------+ ------
- * | CIS | ^
- * 24 +-----------------------------+ |
- * | kernel image | | Zone 0
- * | | |
- * +-----------------------------+ |
- * 1023 | unused area | v
- * +-----------------------------+ ------
- * 1024 | JFFS2 | ^
- * | | |
- * | | | Zone 1
- * | | |
- * | | |
- * | | v
- * 2047 +-----------------------------+ ------
- *
- */
-static struct mtd_partition partition_info[NUM_PARTITIONS] = {
- {
- .name = "RBTX4938 CIS Area",
- .offset = 0,
- .size = (NUMBER_OF_CIS_BLOCKS * SIZE_OF_BLOCK),
- .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
- },
- {
- .name = "RBTX4938 kernel image",
- .offset = MTDPART_OFS_APPEND,
- .size = 8 * 0x00100000, /* 8MB (Depends on size of kernel image) */
- .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
- },
- {
- .name = "Root FS (JFFS2)",
- .offset = (0 + SIZE_OF_ZONE), /* start address of next zone */
- .size = MTDPART_SIZ_FULL
- },
-};
-#endif
-
-static void tx4938ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
-{
- switch (cmd) {
- case NAND_CTL_SETCLE:
- tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CLE;
- break;
- case NAND_CTL_CLRCLE:
- tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CLE;
- break;
- case NAND_CTL_SETALE:
- tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_ALE;
- break;
- case NAND_CTL_CLRALE:
- tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_ALE;
- break;
- /* TX4938_NDFMCR_CE bit is 0:high 1:low */
- case NAND_CTL_SETNCE:
- tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CE;
- break;
- case NAND_CTL_CLRNCE:
- tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CE;
- break;
- case NAND_CTL_SETWP:
- tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_WE;
- break;
- case NAND_CTL_CLRWP:
- tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_WE;
- break;
- }
-}
-static int tx4938ndfmc_dev_ready(struct mtd_info *mtd)
-{
- flush_wb();
- return !(tx4938_ndfmcptr->sr & TX4938_NDFSR_BUSY);
-}
-static void tx4938ndfmc_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
- u32 mcr = tx4938_ndfmcptr->mcr;
- mcr &= ~TX4938_NDFMCR_ECC_ALL;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_READ;
- ecc_code[1] = tx4938_ndfmcptr->dtr;
- ecc_code[0] = tx4938_ndfmcptr->dtr;
- ecc_code[2] = tx4938_ndfmcptr->dtr;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
-}
-static void tx4938ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- u32 mcr = tx4938_ndfmcptr->mcr;
- mcr &= ~TX4938_NDFMCR_ECC_ALL;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_RESET;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
- tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_ON;
-}
-
-static u_char tx4938ndfmc_nand_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- return tx4938_read_nfmc(this->IO_ADDR_R);
-}
-
-static void tx4938ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
-{
- struct nand_chip *this = mtd->priv;
- tx4938_write_nfmc(byte, this->IO_ADDR_W);
-}
-
-static void tx4938ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- tx4938_write_nfmc(buf[i], this->IO_ADDR_W);
-}
-
-static void tx4938ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- buf[i] = tx4938_read_nfmc(this->IO_ADDR_R);
-}
-
-static int tx4938ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- if (buf[i] != tx4938_read_nfmc(this->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Send command to NAND device
- */
-static void tx4938ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- register struct nand_chip *this = mtd->priv;
-
- /* Begin command latch cycle */
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- this->write_byte(mtd, readcmd);
- }
- this->write_byte(mtd, command);
-
- /* Set ALE and clear CLE to start address cycle */
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
-
- if (column != -1 || page_addr != -1) {
- this->hwcontrol(mtd, NAND_CTL_SETALE);
-
- /* Serially input address */
- if (column != -1)
- this->write_byte(mtd, column);
- if (page_addr != -1) {
- this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
- this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
- /* One more address cycle for higher density devices */
- if (mtd->size & 0x0c000000)
- this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
- }
- /* Latch in address */
- this->hwcontrol(mtd, NAND_CTL_CLRALE);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- /* Turn off WE */
- this->hwcontrol (mtd, NAND_CTL_CLRWP);
- return;
-
- case NAND_CMD_SEQIN:
- /* Turn on WE */
- this->hwcontrol (mtd, NAND_CTL_SETWP);
- return;
-
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- this->hwcontrol(mtd, NAND_CTL_SETCLE);
- this->write_byte(mtd, NAND_CMD_STATUS);
- this->hwcontrol(mtd, NAND_CTL_CLRCLE);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *);
-#endif
-/*
- * Main initialization routine
- */
-int __init tx4938ndfmc_init (void)
-{
- struct nand_chip *this;
- int bsprt = 0, hold = 0xf, spw = 0xf;
- int protected = 0;
-
- if ((*rbtx4938_piosel_ptr & 0x0c) != 0x08) {
- printk("TX4938 NDFMC: disabled by IOC PIOSEL\n");
- return -ENODEV;
- }
- bsprt = 1;
- hold = 2;
- spw = 9 - 1; /* 8 GBUSCLK = 80ns (@ GBUSCLK 100MHz) */
-
- if ((tx4938_ccfgptr->pcfg &
- (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL))
- != TX4938_PCFG_NDF_SEL) {
- printk("TX4938 NDFMC: disabled by PCFG.\n");
- return -ENODEV;
- }
-
- /* reset NDFMC */
- tx4938_ndfmcptr->rstr |= TX4938_NDFRSTR_RST;
- while (tx4938_ndfmcptr->rstr & TX4938_NDFRSTR_RST)
- ;
- /* setup BusSeparete, Hold Time, Strobe Pulse Width */
- tx4938_ndfmcptr->mcr = bsprt ? TX4938_NDFMCR_BSPRT : 0;
- tx4938_ndfmcptr->spr = hold << 4 | spw;
-
- /* Allocate memory for MTD device structure and private data */
- tx4938ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
- GFP_KERNEL);
- if (!tx4938ndfmc_mtd) {
- printk ("Unable to allocate TX4938 NDFMC MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *) (&tx4938ndfmc_mtd[1]);
-
- /* Initialize structures */
- memset((char *) tx4938ndfmc_mtd, 0, sizeof(struct mtd_info));
- memset((char *) this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- tx4938ndfmc_mtd->priv = this;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = (unsigned long)&tx4938_ndfmcptr->dtr;
- this->IO_ADDR_W = (unsigned long)&tx4938_ndfmcptr->dtr;
- this->hwcontrol = tx4938ndfmc_hwcontrol;
- this->dev_ready = tx4938ndfmc_dev_ready;
- this->calculate_ecc = tx4938ndfmc_calculate_ecc;
- this->correct_data = nand_correct_data;
- this->enable_hwecc = tx4938ndfmc_enable_hwecc;
- this->eccmode = NAND_ECC_HW3_256;
- this->chip_delay = 100;
- this->read_byte = tx4938ndfmc_nand_read_byte;
- this->write_byte = tx4938ndfmc_nand_write_byte;
- this->cmdfunc = tx4938ndfmc_nand_command;
- this->write_buf = tx4938ndfmc_nand_write_buf;
- this->read_buf = tx4938ndfmc_nand_read_buf;
- this->verify_buf = tx4938ndfmc_nand_verify_buf;
-
- /* Scan to find existance of the device */
- if (nand_scan (tx4938ndfmc_mtd, 1)) {
- kfree (tx4938ndfmc_mtd);
- return -ENXIO;
- }
-
- /* Allocate memory for internal data buffer */
- this->data_buf = kmalloc (sizeof(u_char) * (tx4938ndfmc_mtd->oobblock + tx4938ndfmc_mtd->oobsize), GFP_KERNEL);
- if (!this->data_buf) {
- printk ("Unable to allocate NAND data buffer for TX4938.\n");
- kfree (tx4938ndfmc_mtd);
- return -ENOMEM;
- }
-
- if (protected) {
- printk(KERN_INFO "TX4938 NDFMC: write protected.\n");
- tx4938ndfmc_mtd->flags &= ~(MTD_WRITEABLE | MTD_ERASEABLE);
- }
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- {
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
- mtd_parts_nb = parse_cmdline_partitions(tx4938ndfmc_mtd, &mtd_parts, "tx4938ndfmc");
- if (mtd_parts_nb > 0)
- add_mtd_partitions(tx4938ndfmc_mtd, mtd_parts, mtd_parts_nb);
- else
- add_mtd_device(tx4938ndfmc_mtd);
- }
-#else
- add_mtd_partitions(tx4938ndfmc_mtd, partition_info, NUM_PARTITIONS );
-#endif
-
- return 0;
-}
-module_init(tx4938ndfmc_init);
-
-/*
- * Clean up routine
- */
-static void __exit tx4938ndfmc_cleanup (void)
-{
- struct nand_chip *this = (struct nand_chip *) tx4938ndfmc_mtd->priv;
-
- /* Unregister the device */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- del_mtd_partitions(tx4938ndfmc_mtd);
-#endif
- del_mtd_device (tx4938ndfmc_mtd);
-
- /* Free the MTD device structure */
- kfree (tx4938ndfmc_mtd);
-
- /* Free internal data buffer */
- kfree (this->data_buf);
-}
-module_exit(tx4938ndfmc_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
-MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on TX4938 NDFMC");
/* Linux driver for NAND Flash Translation Layer */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: nftlcore.c,v 1.96 2004/06/28 13:52:55 dbrown Exp $ */
+/* $Id: nftlcore.c,v 1.94 2003/06/23 12:00:08 dwmw2 Exp $ */
/*
The contents of this file are distributed under the GNU General
struct NFTLrecord *nftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH)
- return;
- /* OK, this is moderately ugly. But probably safe. Alternatives? */
- if (memcmp(mtd->name, "DiskOnChip", 10))
+ if (mtd->ecctype != MTD_ECC_RS_DiskOnChip)
return;
- if (!mtd->block_isbad) {
- printk(KERN_ERR
-"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
-"Please use the new diskonchip driver under the NAND subsystem.\n");
- return;
- }
-
DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
nftl = kmalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
nftl->mbd.devnum = -1;
nftl->mbd.blksize = 512;
nftl->mbd.tr = tr;
- memcpy(&nftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
- nftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
if (NFTL_mount(nftl) < 0) {
printk(KERN_WARNING "NFTL: could not mount device\n");
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512, &retlen, movebuf);
+ ret = MTD_READECC(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512, &retlen, movebuf, (char *)&oob, NAND_ECC_DISKONCHIP);
if (ret < 0) {
- ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block])
+ ret = MTD_READECC(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block])
+ (block * 512), 512, &retlen,
- movebuf);
+ movebuf, (char *)&oob, NAND_ECC_DISKONCHIP);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
- memset(&oob, 0xff, sizeof(struct nftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
MTD_WRITEECC(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512),
- 512, &retlen, movebuf, (char *)&oob, &nftl->oobinfo);
+ 512, &retlen, movebuf, (char *)&oob, NAND_ECC_DISKONCHIP);
}
/* add the header so that it is now a valid chain */
if (NFTL_formatblock(nftl, thisEUN) < 0) {
/* could not erase : mark block as reserved
+ * FixMe: Update Bad Unit Table on disk
*/
nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
} else {
u16 writeEUN;
unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
size_t retlen;
- struct nftl_oob oob;
+ u8 eccbuf[6];
writeEUN = NFTL_findwriteunit(nftl, block);
return 1;
}
- memset(&oob, 0xff, sizeof(struct nftl_oob));
- oob.b.Status = oob.b.Status1 = SECTOR_USED;
MTD_WRITEECC(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
- 512, &retlen, (char *)buffer, (char *)&oob, &nftl->oobinfo);
- /* need to write SECTOR_USED flags since they are not written in mtd_writeecc */
+ 512, &retlen, (char *)buffer, (char *)eccbuf, NAND_ECC_DISKONCHIP);
+ /* no need to write SECTOR_USED flags since they are written in mtd_writeecc */
return 0;
}
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- if (MTD_READ(nftl->mbd.mtd, ptr, 512, &retlen, buffer))
+ u_char eccbuf[6];
+ if (MTD_READECC(nftl->mbd.mtd, ptr, 512, &retlen, buffer, eccbuf, NAND_ECC_DISKONCHIP))
return -EIO;
}
return 0;
int __init init_nftl(void)
{
- printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.96 $, nftlmount.c %s\n", nftlmountrev);
+ printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.94 $, nftlmount.c %s\n", nftlmountrev);
return register_mtd_blktrans(&nftl_tr);
}
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: nftlmount.c,v 1.36 2004/06/28 13:52:55 dbrown Exp $
+ * $Id: nftlmount.c,v 1.34 2003/05/21 10:54:10 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define SECTORSIZE 512
-char nftlmountrev[]="$Revision: 1.36 $";
+char nftlmountrev[]="$Revision: 1.34 $";
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
/* Assume logical EraseSize == physical erasesize for starting the scan.
We'll sort it out later if we find a MediaHeader which says otherwise */
- /* Actually, we won't. The new DiskOnChip driver has already scanned
- the MediaHeader and adjusted the virtual erasesize it presents in
- the mtd device accordingly. We could even get rid of
- nftl->EraseSize if there were any point in doing so. */
nftl->EraseSize = nftl->mbd.mtd->erasesize;
nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- ret = MTD_READ(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf);
- /* We ignore ret in case the ECC of the MediaHeader is invalid
- (which is apparently acceptable) */
- if (retlen != SECTORSIZE) {
+ if ((ret = MTD_READ(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf))) {
static int warncount = 5;
if (warncount) {
/* Finally reread to check ECC */
if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf, (char *)&oob, NULL) < 0)) {
+ &retlen, buf, (char *)&oob, NAND_ECC_DISKONCHIP) < 0)) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
continue;
memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
/* Do some sanity checks on it */
-#if 0
-The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
-erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
-device is already correct.
if (mh->UnitSizeFactor == 0) {
printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
} else if (mh->UnitSizeFactor < 0xfc) {
nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
}
-#endif
nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
/* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
for (i = 0; i < nftl->nb_blocks; i++) {
-#if 0
-The new DiskOnChip driver already scanned the bad block table. Just query it.
if ((i & (SECTORSIZE - 1)) == 0) {
/* read one sector for every SECTORSIZE of blocks */
if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize +
i + SECTORSIZE, SECTORSIZE, &retlen, buf,
- (char *)&oob, NULL)) < 0) {
+ (char *)&oob, NAND_ECC_DISKONCHIP)) < 0) {
printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
ret);
kfree(nftl->ReplUnitTable);
/* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
-#endif
- if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
- nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
nftl->MediaUnit = block;
int check_oob)
{
int i, retlen;
- u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
+ u8 buf[SECTORSIZE];
for (i = 0; i < len; i += SECTORSIZE) {
- if (MTD_READECC(nftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &nftl->oobinfo) < 0)
+ /* we want to read the sector without ECC check here since a free
+ sector does not have ECC syndrome on it yet */
+ if (MTD_READ(nftl->mbd.mtd, address, SECTORSIZE, &retlen, buf) < 0)
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
if (check_oob) {
- if (memcmpb(buf + SECTORSIZE, 0xff, nftl->mbd.mtd->oobsize) != 0)
+ if (MTD_READOOB(nftl->mbd.mtd, address, nftl->mbd.mtd->oobsize,
+ &retlen, buf) < 0)
+ return -1;
+ if (memcmpb(buf, 0xff, nftl->mbd.mtd->oobsize) != 0)
return -1;
}
address += SECTORSIZE;
* Return: 0 when succeed, -1 on error.
*
* ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * 2. UnitSizeFactor != 0xFF
*/
int NFTL_formatblock(struct NFTLrecord *nftl, int block)
{
MTD_ERASE(nftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
+ /* could not format, FixMe: We should update the BadUnitTable
+ both in memory and on disk */
printk("Error while formatting block %d\n", block);
- goto fail;
- }
-
+ return -1;
+ } else {
/* increase and write Wear-Leveling info */
nb_erases = le32_to_cpu(uci.WearInfo);
nb_erases++;
* FixMe: is this check really necessary ? since we have check the
* return code after the erase operation. */
if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
- goto fail;
+ return -1;
uci.WearInfo = le32_to_cpu(nb_erases);
if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&uci) < 0)
- goto fail;
+ return -1;
return 0;
-fail:
- /* could not format, update the bad block table (caller is responsible
- for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
- nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
- return -1;
+ }
}
/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
printk("Formatting block %d\n", block);
if (NFTL_formatblock(nftl, block) < 0) {
- /* cannot format !!!! Mark it as Bad Unit */
+ /* cannot format !!!! Mark it as Bad Unit,
+ FixMe: update the BadUnitTable on disk */
nftl->ReplUnitTable[block] = BLOCK_RESERVED;
} else {
nftl->ReplUnitTable[block] = BLOCK_FREE;
/*
- * $Id: redboot.c,v 1.15 2004/08/10 07:55:16 dwmw2 Exp $
+ * $Id: redboot.c,v 1.11 2003/05/21 10:39:26 dwmw2 Exp $
*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
char *names;
char *nullname;
int namelen = 0;
- int nulllen = 0;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
static char nullstring[] = "unallocated";
-#endif
- buf = vmalloc(master->erasesize);
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Read the start of the last erase block */
ret = master->read(master, master->size - master->erasesize,
- master->erasesize, &retlen, (void *)buf);
+ PAGE_SIZE, &retlen, (void *)buf);
if (ret)
goto out;
- if (retlen != master->erasesize) {
+ if (retlen != PAGE_SIZE) {
ret = -EIO;
goto out;
}
goto out;
}
- for (i = 0; i < master->erasesize / sizeof(struct fis_image_desc); i++) {
+ for (i = 0; i < PAGE_SIZE / sizeof(struct fis_image_desc); i++) {
struct fis_list *new_fl, **prev;
if (buf[i].name[0] == 0xff)
nrparts++;
}
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
- if (fl->img->flash_base) {
+ if (fl->img->flash_base)
nrparts++;
- nulllen = sizeof(nullstring);
- }
for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
- if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
+ if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize < tmp_fl->next->img->flash_base)
nrparts++;
- nulllen = sizeof(nullstring);
- }
}
-#endif
- parts = kmalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
+ parts = kmalloc(sizeof(*parts)*nrparts + sizeof(nullstring) + namelen, GFP_KERNEL);
if (!parts) {
ret = -ENOMEM;
goto out;
}
- memset(parts, 0, sizeof(*parts)*nrparts + nulllen + namelen);
+ memset(parts, 0, sizeof(*parts)*nrparts + namelen);
+ /* FIXME: Include nullname only if it's used */
nullname = (char *)&parts[nrparts];
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
- if (nulllen > 0) {
- strcpy(nullname, nullstring);
- }
-#endif
- names = nullname + nulllen;
+ sprintf(nullname, nullstring);
+ names = nullname + sizeof(nullstring);
i=0;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->img->flash_base) {
parts[0].name = nullname;
parts[0].size = fl->img->flash_base;
parts[0].offset = 0;
- i++;
}
-#endif
for ( ; i<nrparts; i++) {
parts[i].size = fl->img->size;
parts[i].offset = fl->img->flash_base;
parts[i].name = names;
strcpy(names, fl->img->name);
-#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
- if (!memcmp(names, "RedBoot", 8) ||
- !memcmp(names, "RedBoot config", 15) ||
- !memcmp(names, "FIS directory", 14)) {
- parts[i].mask_flags = MTD_WRITEABLE;
- }
-#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
- if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize < fl->next->img->flash_base) {
i++;
parts[i].offset = parts[i-1].size + parts[i-1].offset;
parts[i].size = fl->next->img->flash_base - parts[i].offset;
parts[i].name = nullname;
}
-#endif
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);
fl = fl->next;
kfree(old);
}
- vfree(buf);
+ kfree(buf);
return ret;
}
outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */
/* Clear the Tx ring. */
for (i = 0; i < TX_RING_SIZE; i++)
- vp->tx_skbuff[i] = NULL;
+ vp->tx_skbuff[i] = 0;
outl(0, ioaddr + DownListPtr);
}
/* Set receiver mode: presumably accept b-case and phys addr only. */
break; /* It still hasn't been processed. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
+ lp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
- vp->rx_skbuff[i] = NULL;
+ vp->rx_skbuff[i] = 0;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++)
if (vp->tx_skbuff[i]) {
dev_kfree_skb(vp->tx_skbuff[i]);
- vp->tx_skbuff[i] = NULL;
+ vp->tx_skbuff[i] = 0;
}
}
#endif
unsigned long flags;
- MPU_PORT(dev, PORT_RESET, NULL);
+ MPU_PORT(dev, PORT_RESET, 0);
udelay(100); /* Wait 100us - seems to help */
failed:
printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
- MPU_PORT(dev, PORT_RESET, NULL);
+ MPU_PORT(dev, PORT_RESET, 0);
return -1;
}
config OAKNET
tristate "National DP83902AV (Oak ethernet) support"
- depends on NET_ETHERNET && PPC && BROKEN
+ depends on NET_ETHERNET && PPC
select CRC32
help
Say Y if your machine has this type of Ethernet network card.
config NE2000
tristate "NE2000/NE1000 support"
- depends on ISA || (Q40 && m)
select CRC32
---help---
If you have a network (Ethernet) card of this type, say Y and read
*/
#define ACE_MINI_SIZE 100
-#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
-#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
-#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
+#define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
+#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
+#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
/*
* There seems to be a magic difference in the effect between 995 and 996
ringp = &ap->skb->rx_std_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_STD_BUFSIZE,
+ ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = pci_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_MINI_BUFSIZE,
+ ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_JUMBO_BUFSIZE,
+ ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
- info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
+ info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure IP header starts on a fresh cache line.
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_STD_BUFSIZE,
+ ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_STD_BUFSIZE;
+ rd->size = ACE_STD_MTU + ETH_HLEN + 4;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure the IP header ends up on a fresh cache line
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_MINI_BUFSIZE,
+ ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_MINI_BUFSIZE;
+ rd->size = ACE_MINI_SIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure the IP header ends up on a fresh cache line
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_JUMBO_BUFSIZE,
+ ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_JUMBO_BUFSIZE;
+ rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
- mapsize = ACE_STD_BUFSIZE;
+ mapsize = ACE_STD_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
- mapsize = ACE_JUMBO_BUFSIZE;
+ mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
- mapsize = ACE_MINI_BUFSIZE;
+ mapsize = ACE_MINI_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
#define tigonFwBssAddr 0x00015dd0
#define tigonFwBssLen 0x2080
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
-#define tigonFwText NULL
-#define tigonFwData NULL
-#define tigonFwRodata NULL
+#define tigonFwText 0
+#define tigonFwData 0
+#define tigonFwRodata 0
#else
/* Generated by genfw.c */
static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
lp->tx_skbuff[tx_index]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
- lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_skbuff[tx_index] = 0;
lp->tx_dma_addr[tx_index] = 0;
}
lp->tx_complete_idx++;
if( dev->mc_count == 0 ){
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
- lp->mc_list = NULL;
+ lp->mc_list = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */
return 0;
}
+#ifdef CONFIG_NET_FASTROUTE
+static int bond_accept_fastpath(struct net_device *bond_dev, struct dst_entry *dst)
+{
+ return -1;
+}
+#endif
+
/*------------------------- Device initialization ---------------------------*/
/*
bond_set_mode_ops(bond_dev, bond->params.mode);
bond_dev->destructor = free_netdev;
+#ifdef CONFIG_NET_FASTROUTE
+ bond_dev->accept_fastpath = bond_accept_fastpath;
+#endif
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
{
if (lp->dma_buff) {
free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
- lp->dma_buff = NULL;
+ lp->dma_buff = 0;
}
}
#endif
* $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $
*/
-#ifdef ASSEMBLER
+#if ASSEMBLER
# define MO(t,a) (a)
# define VMO(t,a) (a)
/************************************************************************/
typedef volatile struct _I596_RBD
{
-#ifdef INTEL_RETENTIVE
+#if INTEL_RETENTIVE
ushort count; /* Length of data in buf */
ushort offset;
#else
#endif
vol struct _I596_RBD *next; /* Next buffer descriptor in list */
uchar *buf; /* Data buffer */
-#ifdef INTEL_RETENTIVE
+#if INTEL_RETENTIVE
ushort size; /* Size of buf (constant) */
ushort zero;
#else
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
/* Allocate the rx buffers */
else
dev_kfree_skb (skb);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
}
}
{
}
+#ifdef CONFIG_NET_FASTROUTE
+static int dummy_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ return -1;
+}
+#endif
+
static void __init dummy_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->hard_start_xmit = dummy_xmit;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = dummy_set_address;
+#ifdef CONFIG_NET_FASTROUTE
+ dev->accept_fastpath = dummy_accept_fastpath;
+#endif
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
#include "e1000_hw.h"
-#ifdef DBG
+#if DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
#else
#define E1000_DBG(args...)
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#ifdef DBG
+#if DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Set up the Tx queue early.. */
sp->cur_tx = 0;
sp->dirty_tx = 0;
- sp->last_cmd = NULL;
+ sp->last_cmd = 0;
sp->tx_full = 0;
sp->in_interrupt = 0;
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
}
sp->dirty_tx++;
}
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = sp->rx_skbuff[i];
- sp->rx_skbuff[i] = NULL;
+ sp->rx_skbuff[i] = 0;
/* Clear the Rx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
- sp->tx_skbuff[i] = NULL;
+ sp->tx_skbuff[i] = 0;
/* Clear the Tx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = NULL; /* Redundant. */
+ sp->tx_skbuff[entry] = 0; /* Redundant. */
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
sp->last_cmd = mc_setup_frm;
/* Change the command to a NoOp, pointing to the CmdMulti command. */
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
- ep->rx_skbuff[i] = NULL;
+ ep->rx_skbuff[i] = 0;
}
/* Mark the last entry as wrapping the ring. */
ep->rx_ring[i-1].next = ep->rx_ring_dma;
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- ep->tx_skbuff[i] = NULL;
+ ep->tx_skbuff[i] = 0;
ep->tx_ring[i].txstatus = 0x0000;
ep->tx_ring[i].next = ep->tx_ring_dma +
(i+1)*sizeof(struct epic_tx_desc);
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- ep->tx_skbuff[entry] = NULL;
+ ep->tx_skbuff[entry] = 0;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i];
- ep->rx_skbuff[i] = NULL;
+ ep->rx_skbuff[i] = 0;
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = ep->tx_skbuff[i];
- ep->tx_skbuff[i] = NULL;
+ ep->tx_skbuff[i] = 0;
if (!skb)
continue;
pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
{
if (!eql_is_full(queue)) {
- slave_t *duplicate_slave = NULL;
+ slave_t *duplicate_slave = 0;
duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
if (duplicate_slave != 0)
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
+
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
static int ethertap_debug;
static int max_taps = 1;
-module_param(max_taps, int, 0);
+MODULE_PARM(max_taps, "i");
MODULE_PARM_DESC(max_taps,"Max number of ethernet tap devices");
static struct net_device **tap_map; /* Returns the tap device for a given netlink */
hmp->tx_ring[entry].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[entry] = NULL;
+ hmp->tx_skbuff[entry] = 0;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
}
}
pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[i].status_n_length = 0;
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
for (i = 0; i < TX_RING_SIZE; i++) {
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
hmp->tx_ring[i].status_n_length = 0;
}
/* Mark the last entry of the ring */
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- hmp->tx_skbuff[entry] = NULL;
+ hmp->tx_skbuff[entry] = 0;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->rx_ring[i].addr, hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
hmp->tx_ring[i].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
}
}
write_lock(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock(&disc_data_lock);
if (sp == 0)
return;
unregister_netdev(sp->dev);
}
-static int sp_set_mac_address(struct net_device *dev, void __user *addr)
+static int sp_set_mac_address(struct net_device *dev, void *addr)
{
return copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN) ? -EFAULT : 0;
}
switch(cmd) {
case SIOCGIFNAME:
- err = copy_to_user((void __user *) arg, sp->dev->name,
+ err = copy_to_user((void *) arg, sp->dev->name,
strlen(sp->dev->name) + 1) ? -EFAULT : 0;
break;
case SIOCGIFENCAP:
- err = put_user(0, (int __user *)arg);
+ err = put_user(0, (int *)arg);
break;
case SIOCSIFENCAP:
- if (get_user(tmp, (int __user *) arg)) {
+ if (get_user(tmp, (int *) arg)) {
err = -EFAULT;
break;
}
break;
case SIOCSIFHWADDR:
- err = sp_set_mac_address(sp->dev, (void __user *) arg);
+ err = sp_set_mac_address(sp->dev, (void *) arg);
break;
/* Allow stty to read, but not set, the serial port */
unregister_netdev(ax->dev);
- tty->disc_data = NULL;
+ tty->disc_data = 0;
ax->tty = NULL;
ax_free(ax);
}
-static int ax_set_mac_address(struct net_device *dev, void __user *addr)
+static int ax_set_mac_address(struct net_device *dev, void *addr)
{
if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
return -EFAULT;
/* Perform I/O control on an active ax25 channel. */
-static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
+static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
{
struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
unsigned int tmp;
return 0;
case SIOCGIFENCAP:
- return put_user(4, (int __user *)arg);
+ return put_user(4, (int *)arg);
case SIOCSIFENCAP:
- if (get_user(tmp, (int __user *)arg))
+ if (get_user(tmp, (int *)arg))
return -EFAULT;
ax->mode = tmp;
ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
* ------------------
*
* You can find a subset of the documentation in
- * Documentation/networking/z8530drv.txt.
+ * linux/Documentation/networking/z8530drv.txt.
*/
/*
}
/* Set the wrap registers for string I/O reads. */
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = ioaddr + NIC_OFFSET;
ei_status.name = name;
ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
+ ei_status.rx_start_page = HP_START_PG + TX_2X_PAGES;
ei_status.stop_page = HP_STOP_PG;
ei_status.reset_8390 = &hpp_reset_8390;
ei_status.block_output = &hpp_mem_block_output;
ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
dev->mem_start = mem_start;
- ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
+ ei_status.rmem_start = dev->mem_start + TX_2X_PAGES*256;
dev->mem_end = ei_status.rmem_end
= dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
}
/* Set the wrap registers for programmed-I/O operation. */
outw(HW_Page, ioaddr + HP_PAGING);
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Select the operational page. */
outw(Perf_Page, ioaddr + HP_PAGING);
config TOSHIBA_FIR
tristate "Toshiba Type-O IR Port"
- depends on IRDA && PCI && !64BIT
+ depends on IRDA && !64BIT
help
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
config VIA_FIR
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
- depends on IRDA && ISA && PCI
+ depends on IRDA && ISA
help
Say Y here if you want to build support for the VIA VT8231
and VIA VT1211 IrDA controllers, found on the motherboards using
/* Delay a few ms just to allow the reset to complete */
msec_delay(IXGB_DELAY_AFTER_RESET);
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-#ifdef DBG
+#if DBG
/* Make sure the self-clearing global reset bit did self clear */
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
#define ASSERT(x) if(!(x)) BUG()
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#ifdef DBG
+#if DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = lp->rx_skbuff[i];
- lp->rx_skbuff[i] = NULL;
+ lp->rx_skbuff[i] = 0;
lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
if (skb)
dev_kfree_skb_any(skb);
/* The Tx buffer address is filled in as needed, but we do need to clear
the upper ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- lp->tx_skbuff[i] = NULL;
+ lp->tx_skbuff[i] = 0;
lp->tx_ring[i].base = 0;
}
in the bounce buffer. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
+ lp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
#include <asm/cache.h>
#include <asm/parisc-device.h>
-#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
+static char version[] __devinitdata =
+ "82596.c $Revision: 1.29 $\n";
/* DEBUG flags
*/
do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
#define CHECK_INV(addr,len) \
- do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+ do { dma_cache_sync((void *)addr,len, DMA_FROM_DEVICE); } while(0)
#define CHECK_WBACK_INV(addr,len) \
- do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+ do { dma_cache_sync((void *)addr,len, DMA_BIDIRECTIONAL); } while (0)
#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
rfd = lp->rfd_head;
printk("rfd_head = %p\n", rfd);
do {
- printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+ printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
" count %04x\n",
rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
rfd->count);
struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
if (skb == NULL)
- panic("%s: alloc_skb() failed", __FILE__);
+ panic("82596: alloc_skb() failed");
skb_reserve(skb, 2);
dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
DMA_FROM_DEVICE);
disable_irq(dev->irq); /* disable IRQs from LAN */
DEB(DEB_INIT,
- printk("RESET 82596 port: %p (with IRQ %d disabled)\n",
- (void*)(dev->base_addr + PA_I82596_RESET),
+ printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
+ dev->base_addr + PA_I82596_RESET,
dev->irq));
gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
lp->cmd_head = NULL;
lp->scb.cmd = I596_NULL;
- DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
+ DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
CA(dev);
- if (wait_istat(dev, lp, 1000, "initialization timed out"))
+ if (wait_istat(dev,lp,1000,"initialization timed out"))
goto failed;
- DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
+ DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
/* Ensure rx frame/buffer descriptors are tidy */
rebuild_rx_bufs(dev);
enable_irq(dev->irq); /* enable IRQs from LAN */
- DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
memcpy(lp->cf_cmd.i596_config, init_setup, 14);
lp->cf_cmd.cmd.command = CmdConfigure;
CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
i596_add_cmd(dev, &lp->cf_cmd.cmd);
- DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
lp->sa_cmd.cmd.command = CmdSASetup;
CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &lp->sa_cmd.cmd);
- DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
lp->tdr_cmd.cmd.command = CmdTDR;
CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
i596_add_cmd(dev, &lp->tdr_cmd.cmd);
spin_lock_irqsave (&lp->lock, flags);
- if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
+ if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
spin_unlock_irqrestore (&lp->lock, flags);
goto failed;
}
- DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
+ DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
lp->scb.command = RX_START;
lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
spin_unlock_irqrestore (&lp->lock, flags);
- if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
+ if (wait_cmd(dev,lp,1000,"RX_START not processed"))
goto failed;
- DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
+ DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
return 0;
struct i596_rbd *rbd;
int frames = 0;
- DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
+ DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
lp->rfd_head, lp->rbd_head));
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
}
else {
CHECK_INV(rfd, sizeof(struct i596_rfd));
}
- DEB(DEB_RXFRAME, printk("frames %d\n", frames));
+ DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
return 0;
}
CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
}
- wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
+ wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
lp->scb.cmd = I596_NULL;
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
}
{
unsigned long flags;
- DEB(DEB_RESET, printk("i596_reset\n"));
+ DEB(DEB_RESET,printk("i596_reset\n"));
spin_lock_irqsave (&lp->lock, flags);
- wait_cmd(dev, lp, 100, "i596_reset timed out");
+ wait_cmd(dev,lp,100,"i596_reset timed out");
netif_stop_queue(dev);
CA(dev);
/* wait for shutdown */
- wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
+ wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
spin_unlock_irqrestore (&lp->lock, flags);
i596_cleanup_cmd(dev,lp);
struct i596_private *lp = dev->priv;
unsigned long flags;
- DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
+ DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
cmd->status = 0;
cmd->command |= (CMD_EOL | CMD_INTR);
CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
} else {
lp->cmd_head = cmd;
- wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
+ wait_cmd(dev,lp,100,"i596_add_cmd timed out");
lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
lp->scb.command = CUC_START;
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
static int i596_open(struct net_device *dev)
{
- DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
+ DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
printk("%s: IRQ %d not free\n", dev->name, dev->irq);
struct i596_private *lp = dev->priv;
/* Transmitter timeout, serious problems. */
- DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
+ DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
dev->name));
lp->stats.tx_errors++;
/* Try to restart the adaptor */
if (lp->last_restart == lp->stats.tx_packets) {
- DEB(DEB_ERRORS, printk("Resetting board.\n"));
+ DEB(DEB_ERRORS,printk ("Resetting board.\n"));
/* Shutdown and restart */
i596_reset (dev, lp);
} else {
/* Issue a channel attention signal */
- DEB(DEB_ERRORS, printk("Kicking board.\n"));
+ DEB(DEB_ERRORS,printk ("Kicking board.\n"));
lp->scb.command = CUC_START | RX_START;
CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
CA (dev);
short length = skb->len;
dev->trans_start = jiffies;
- DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
+ DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
skb->len, skb->data));
if (length < ETH_ZLEN) {
tbd = lp->tbds + lp->next_tx_cmd;
if (tx_cmd->cmd.command) {
- DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
+ DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
dev->name));
lp->stats.tx_dropped++;
/* This lot is ensure things have been cache line aligned. */
if (sizeof(struct i596_rfd) != 32) {
printk("82596: sizeof(struct i596_rfd) = %d\n",
- (int)sizeof(struct i596_rfd));
+ sizeof(struct i596_rfd));
return -ENODEV;
}
if ((sizeof(struct i596_rbd) % 32) != 0) {
printk("82596: sizeof(struct i596_rbd) = %d\n",
- (int)sizeof(struct i596_rbd));
+ sizeof(struct i596_rbd));
return -ENODEV;
}
if ((sizeof(struct tx_cmd) % 32) != 0) {
printk("82596: sizeof(struct tx_cmd) = %d\n",
- (int)sizeof(struct tx_cmd));
+ sizeof(struct tx_cmd));
return -ENODEV;
}
if (sizeof(struct i596_tbd) != 32) {
printk("82596: sizeof(struct i596_tbd) = %d\n",
- (int)sizeof(struct i596_tbd));
+ sizeof(struct i596_tbd));
return -ENODEV;
}
#ifndef __LP64__
if (sizeof(struct i596_private) > 4096) {
printk("82596: sizeof(struct i596_private) = %d\n",
- (int)sizeof(struct i596_private));
+ sizeof(struct i596_private));
return -ENODEV;
}
#endif
for (i=0; i < 6; i++) {
eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
- printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+ printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
}
dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
if (!dev->mem_start) {
- printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+ printk("%s: Couldn't get shared memory\n", dev->name);
return -ENOMEM;
}
+ DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = eth_addr[i];
+ DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+
+ DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
+
+ DEB(DEB_PROBE,printk(version));
/* The 82596-specific entries in the device structure. */
dev->open = i596_open;
dev->priv = (void *)(dev->mem_start);
lp = dev->priv;
+ DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
+ dev->name, (unsigned long)lp,
+ sizeof(struct i596_private), (unsigned long)&lp->scb));
memset(lp, 0, sizeof(struct i596_private));
lp->scb.command = 0;
CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
- i = register_netdev(dev);
- if (i) {
- lp = dev->priv;
- dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
- (void *)dev->mem_start, lp->dma_addr);
- return i;
- };
-
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
- DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
- dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
-
return 0;
}
unsigned short status, ack_cmd = 0;
if (dev == NULL) {
- printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
+ printk("i596_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
spin_lock (&lp->lock);
- wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
status = lp->scb.status;
- DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
dev->name, irq, status));
ack_cmd = status & 0xf000;
struct i596_cmd *ptr;
if ((status & 0x8000))
- DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
+ DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
if ((status & 0x2000))
- DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+ DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
while (lp->cmd_head != NULL) {
CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
ptr = lp->cmd_head;
- DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
+ DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
lp->cmd_head->status, lp->cmd_head->command));
lp->cmd_head = ptr->v_next;
lp->cmd_backlog--;
struct sk_buff *skb = tx_cmd->skb;
if ((ptr->status) & STAT_OK) {
- DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
} else {
lp->stats.tx_errors++;
if ((ptr->status) & 0x0020)
unsigned short status = ((struct tdr_cmd *)ptr)->status;
if (status & 0x8000) {
- DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
+ DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
} else {
if (status & 0x4000)
printk("%s: Transceiver problem.\n", dev->name);
if (status & 0x1000)
printk("%s: Short circuit.\n", dev->name);
- DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
+ DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
}
break;
}
}
if ((status & 0x1000) || (status & 0x4000)) {
if ((status & 0x4000))
- DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
+ DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
i596_rx(dev);
/* Only RX_START if stopped - RGH 07-07-96 */
if (status & 0x1000) {
if (netif_running(dev)) {
- DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
ack_cmd |= RX_START;
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
}
}
}
- wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
lp->scb.command = ack_cmd;
CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
CA(dev);
- wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
- DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
+ wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
+ DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
spin_unlock (&lp->lock);
return IRQ_HANDLED;
netif_stop_queue(dev);
- DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, lp->scb.status));
spin_lock_irqsave(&lp->lock, flags);
- wait_cmd(dev, lp, 100, "close1 timed out");
+ wait_cmd(dev,lp,100,"close1 timed out");
lp->scb.command = CUC_ABORT | RX_ABORT;
CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
CA(dev);
- wait_cmd(dev, lp, 100, "close2 timed out");
+ wait_cmd(dev,lp,100,"close2 timed out");
spin_unlock_irqrestore(&lp->lock, flags);
DEB(DEB_STRUCT,i596_display_data(dev));
i596_cleanup_cmd(dev,lp);
struct i596_private *lp = dev->priv;
int config = 0, cnt;
- DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
- dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+ DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
lp->cf_cmd.i596_config[8] |= 0x01;
for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
memcpy(cp, dmi->dmi_addr, 6);
if (i596_debug > 1)
- DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
}
CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
if (num_drivers >= MAX_DRIVERS) {
/* max count of possible i82596 drivers reached */
- return -ENOMEM;
+ return -ENODEV;
}
-
- if (num_drivers == 0)
- printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
if (!dev->irq) {
- printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
- __FILE__, dev->hpa);
+ printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
return -ENODEV;
}
return -ENODEV;
}
+ retval = register_netdev(netdevice);
+ if (retval) {
+ struct i596_private *lp = netdevice->priv;
+ printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
+ dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
+ (void *)netdevice->mem_start, lp->dma_addr);
+ free_netdev(netdevice);
+ return -ENODEV;
+ };
if (dev->id.sversion == 0x72) {
((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
}
(void *)netdevice->mem_start, lp->dma_addr);
free_netdev(netdevice);
}
- num_drivers = 0;
unregister_parisc_driver(&lan_driver);
}
kfree(rfd);
} while (rfd != lp->rx_tail);
- lp->rx_tail = NULL;
+ lp->rx_tail = 0;
#if 0
for (lp->rbd_list) {
for (i = 0; i < N_RX_RING; ++i) {
if (mp->rx_bufs[i] != 0) {
dev_kfree_skb(mp->rx_bufs[i]);
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
}
}
for (i = mp->tx_empty; i != mp->tx_fill; ) {
cp->xfer_status = 0;
++cp;
}
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
st_le16(&cp->command, DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
mp->stats.rx_bytes += skb->len;
netif_rx(skb);
dev->last_rx = jiffies;
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
++mp->stats.rx_packets;
}
} else {
static int __init myri_sbus_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
- {NULL,}
+ {0,}
};
#endif
static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM_DESC(io, "(ignored)");
MODULE_PARM_DESC(irq, "(ignored)");
MODULE_PARM_DESC(bad, "(ignored)");
+#endif
/* Module code fixed by David Weinehall */
frag = skb_shinfo(skb)->frags;
if (!nr_frags)
- frag = NULL;
+ frag = 0;
extsts = 0;
if (skb->ip_summed == CHECKSUM_HW) {
extsts |= EXTSTS_IPPKT;
}
else if (ei_local->tx2 == 0)
{
- output_page = ei_local->tx_start_page + TX_PAGES/2;
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
Modified from Am79C90 data sheet.
---------------------------------------------------------------------------- */
-#ifdef BROKEN_MULTICAST
+#if BROKEN_MULTICAST
static void updateCRC(int *CRC, int bit)
{
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
ap->olim = buf;
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
return 1;
}
clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
ap->optr = ap->olim;
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
/* queue the frame to be processed */
skb->cb[0] = ap->state;
skb_queue_tail(&ap->rqueue, skb);
- ap->rpkt = NULL;
+ ap->rpkt = 0;
ap->state = 0;
return;
struct ppp *ppp;
if (pf != 0) {
- file->private_data = NULL;
+ file->private_data = 0;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = 0;
ret = count;
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- *skb_push(skb, 2) = 1;
+ {
+ u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
+
+ *p = htons(4); /* indicate outbound in DLT_LINUX_SLL */;
+ }
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
kfree_skb(skb);
return;
}
spin_lock_bh(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
} else {
/* channel got unregistered */
kfree_skb(skb);
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
}
spin_unlock_bh(&pch->downl);
return;
return;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
kfree_skb(skb);
}
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- *skb_push(skb, 2) = 0;
+ {
+ u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
+
+ *p = 0; /* indicate inbound in DLT_LINUX_SLL */
+ }
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
if (pch == 0)
return; /* should never happen */
- chan->ppp = NULL;
+ chan->ppp = 0;
/*
* This ensures that we have returned from any calls into the
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = NULL;
+ pch->chan = 0;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
- ppp->xc_state = NULL;
+ ppp->xc_state = 0;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
- ppp->rc_state = NULL;
+ ppp->rc_state = 0;
ppp_unlock(ppp);
if (xstate) {
if (ce->comp->compress_proto == proto)
return ce;
}
- return NULL;
+ return 0;
}
/* Register a compressor */
find_compressor(int type)
{
struct compressor_entry *ce;
- struct compressor *cp = NULL;
+ struct compressor *cp = 0;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
down(&all_ppp_sem);
ppp_lock(ppp);
dev = ppp->dev;
- ppp->dev = NULL;
+ ppp->dev = 0;
ppp_unlock(ppp);
/* This will call dev_close() for us. */
if (dev) {
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
- ppp->vj = NULL;
+ ppp->vj = 0;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
}
if (ppp->active_filter) {
kfree(ppp->active_filter);
- ppp->active_filter = NULL;
+ ppp->active_filter = 0;
}
#endif /* CONFIG_PPP_FILTER */
if (pch->file.index == unit)
return pch;
}
- return NULL;
+ return 0;
}
/*
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
tty_stuffed = 1;
} else {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
flush:
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_lock_bh(&ap->xmit_lock);
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
#define PPPOE_HASH_BITS 4
#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
-static struct ppp_channel_ops pppoe_chan_ops;
-
static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
skb ? session_id : session_id | 0x40, frame_id);
if (skb) {
dev_kfree_skb(skb);
- skb = NULL;
+ skb = 0;
}
good_frame:
dev->last_rx = jiffies;
stats->rx_bytes+=dlen;
stats->rx_packets++;
- lp->rx_skb[ns] = NULL;
+ lp->rx_skb[ns] = 0;
lp->rx_session_id[ns] |= 0x40;
return 0;
if (ns < NPIDS) {
if ((skb = lp->rx_skb[ns])) {
dev_kfree_skb(skb);
- lp->rx_skb[ns] = NULL;
+ lp->rx_skb[ns] = 0;
}
lp->rx_session_id[ns] |= 0x40;
}
return 0;
}
\f
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-\f
/* The typical workload of the driver:
Handle the network interface interrupts. */
static irqreturn_t seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
}
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it. This has to
+ * occur in the interrupt handler!
+ */
+inline void wait_for_buffer(struct net_device * dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned long tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
+ cpu_relax();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
#ifdef MODULE
static struct net_device *dev_seeq;
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
{ "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
{ "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
- {NULL,},
+ {0,},
};
struct mii_phy {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_skbuff[i] = 0;
sis_priv->tx_ring[i].cmdsts = 0;
sis_priv->tx_ring[i].bufptr = 0;
sis_priv->stats.tx_dropped++;
sis_priv->rx_ring[i].bufptr,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- sis_priv->rx_skbuff[i] = NULL;
+ sis_priv->rx_skbuff[i] = 0;
}
}
for (i = 0; i < NUM_TX_DESC; i++) {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_skbuff[i] = 0;
}
}
for (i = 0 ; i < 6 ; i++, p++)
*p = canonical[*p] ;
}
- slot = NULL;
+ slot = 0 ;
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->n) { /* not used */
if (!del && !slot) /* if !del save first free */
{
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t1 ;
- struct s_smt_fp_txd volatile *t2 = NULL ;
+ struct s_smt_fp_txd volatile *t2=0 ;
SMbuf *mb ;
u_long tbctrl ;
int i ;
{
struct smt_para *pa ;
const struct s_p_tab *pt ;
- struct fddi_mib_m *mib_m = NULL;
- struct fddi_mib_p *mib_p = NULL;
+ struct fddi_mib_m *mib_m = 0 ;
+ struct fddi_mib_p *mib_p = 0 ;
int len ;
int plen ;
char *from ;
/*
* check special paras
*/
- swap = NULL;
+ swap = 0 ;
switch (para) {
case SMT_P10F0 :
case SMT_P10F1 :
char c ;
char *mib_addr ;
struct fddi_mib *mib ;
- struct fddi_mib_m *mib_m = NULL;
- struct fddi_mib_a *mib_a = NULL;
- struct fddi_mib_p *mib_p = NULL;
+ struct fddi_mib_m *mib_m = 0 ;
+ struct fddi_mib_a *mib_a = 0 ;
+ struct fddi_mib_p *mib_p = 0 ;
int mac ;
int path ;
int port ;
const struct s_p_tab *pt ;
for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
;
- return(pt->p_num ? pt : NULL) ;
+ return(pt->p_num ? pt : 0) ;
}
static int smt_mib_phys(struct s_smc *smc)
char *p ;
int len ;
int plen ;
- void *found = NULL;
+ void *found = 0 ;
SK_UNUSED(smc) ;
len -= plen ;
if (len < 0) {
DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
- return NULL;
+ return(0) ;
}
if ((plen & 3) && (para != SMT_P_ECHODATA)) {
DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
- return NULL;
+ return(0) ;
}
if (found)
return(found) ;
}
- return NULL;
+ return(0) ;
}
#if 0
*/
/* Attention: don't initialize mib pointer here! */
/* It must be initialized during phase 2 */
- smc->y[port].mib = NULL;
+ smc->y[port].mib = 0 ;
mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ;
pm->fddiPORTIndex = port+INDEX_PORT ;
{ "SBACOMMAND",16, 0 } ,
{ "SBAAVAILABLE",17, 1, 0, 100 } ,
#endif
- { NULL }
+ { 0 }
} ;
/* Define maximum string size for values and keybuffer */
void smt_timer_init(struct s_smc *smc)
{
- smc->t.st_queue = NULL;
+ smc->t.st_queue = 0 ;
smc->t.st_fast.tm_active = FALSE ;
- smc->t.st_fast.tm_next = NULL;
+ smc->t.st_fast.tm_next = 0 ;
hwt_init(smc) ;
}
timer->tm_active = TRUE ;
if (!smc->t.st_queue) {
smc->t.st_queue = timer ;
- timer->tm_next = NULL;
+ timer->tm_next = 0 ;
timer->tm_delta = time ;
hwt_start(smc,time) ;
return ;
done = 1 ;
}
}
- *last = NULL;
+ *last = 0 ;
next = smc->t.st_queue ;
smc->t.st_queue = tm ;
if (evc->evc_code == code && evc->evc_index == index)
return(evc) ;
}
- return NULL;
+ return(0) ;
}
#define THRESHOLD_2 (2*TICKS_PER_SECOND)
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
return;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
sl->tty = NULL;
if (!sl->leased)
sl->line = 0;
static int __init bigmac_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
{"D-Link DFE-530TXS FAST Ethernet Adapter"},
{"D-Link DL10050-based FAST Ethernet Adapter"},
{"Sundance Technology Alta"},
- {NULL,}, /* 0 terminated list. */
+ {0,}, /* 0 terminated list. */
};
/* This driver was written to use PCI memory space, however x86-oriented
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
np->rx_ring[i].frag[0].length = 0;
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->tx_ring[i].status = 0;
}
return;
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->stats.tx_dropped++;
}
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].frag[0].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
}
}
/* Let the chip settle down a bit, it seems that helps
* for sleep mode on some models
*/
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
/* Make sure we aren't polling PHY status change. We
* don't currently use that feature though
* dont wait a bit here, looks like the chip takes
* some time to really shut down
*/
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
}
writel(0, gp->regs + MAC_TXCFG);
lp->tx_new = TX_NEXT(entry);
}
-struct net_device *last_dev;
+struct net_device *last_dev = 0;
static int lance_open(struct net_device *dev)
{
static int __init sparc_lance_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
- struct sbus_dma *ledma = NULL;
+ struct sbus_dev *sdev = 0;
+ struct sbus_dma *ledma = 0;
static int called;
int cards = 0, v;
for_each_sbusdev (sdev, bus) {
if (strcmp(sdev->prom_name, "le") == 0) {
cards++;
- if ((v = sparc_lance_init(sdev, NULL, NULL)))
+ if ((v = sparc_lance_init(sdev, 0, 0)))
return v;
continue;
}
cards++;
ledma = find_ledma(sdev);
if ((v = sparc_lance_init(sdev->child,
- ledma, NULL)))
+ ledma, 0)))
return v;
continue;
}
if (strcmp(sdev->prom_name, "lebuffer") == 0){
cards++;
if ((v = sparc_lance_init(sdev->child,
- NULL, sdev)))
+ 0, sdev)))
return v;
continue;
}
{
struct net_device *dev = NULL;
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
/* restore 5701 hardware bug workaround flag */
tp->tg3_flags = flags_save;
- /* Unfortunately, we have to delay before the PCI read back.
- * Some 575X chips even will not respond to a PCI cfg access
- * when the reset command is given to the chip.
- *
- * How do these hardware designers expect things to work
- * properly if the PCI write is posted for a long period
- * of time? It is always necessary to have some method by
- * which a register read back can occur to push the write
- * out which does the reset.
- *
- * For most tg3 variants the trick below was working.
- * Ho hum...
- */
- udelay(120);
-
/* Flush PCI posted writes. The normal MMIO registers
* are inaccessible at this time so this is the only
- * way to make this reliably (actually, this is no longer
- * the case, see above). I tried to use indirect
+ * way to make this reliably. I tried to use indirect
* register read/write but this upset some 5701 variants.
*/
pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
/* Define this to enable Link beat monitoring */
#undef MONITOR
-/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+/* Turn on debugging. See linux/Documentation/networking/tlan.txt for details */
static int debug;
static int bbuf;
const char *media[] = {
"10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+ "100baseTx-FD", "100baseT4", 0
};
int media_map[] = { 0x0020, 0x0040, 0x0080, 0x0100, 0x0200,};
config SMCTR
tristate "SMC ISA/MCA adapter support"
- depends on TR && (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
+ depends on TR && (ISA || MCA_LEGACY)
---help---
This is support for the ISA and MCA SMC Token Ring cards,
specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
}
}
/* Lite-On boards have the address byte-swapped. */
- if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0)
&& dev->dev_addr[1] == 0x00)
for (i = 0; i < 6; i+=2) {
char tmp = dev->dev_addr[i];
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#ifdef DEBUG
+#if DEBUG
print_binary(status);
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
xircom_init_ring(dev);
/* Clear the tx ring */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
tp->tx_ring[i].status = 0;
}
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
tp->tx_ring[i].status = 0;
tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
#ifdef CARDBUS
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_skbuff[entry]);
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = tp->rx_skbuff[i];
- tp->rx_skbuff[i] = NULL;
+ tp->rx_skbuff[i] = 0;
tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
tp->rx_ring[i].length = 0;
tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (tp->tx_skbuff[i])
dev_kfree_skb(tp->tx_skbuff[i]);
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
}
tp->open = 0;
if (entry != 0) {
/* Avoid a chip errata by prefixing a dummy entry. */
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
tp->tx_ring[entry].length =
(entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
tp->tx_ring[entry].buffer1 = 0;
entry = tp->cur_tx++ % TX_RING_SIZE;
}
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE - 1)
tx_flags |= Tx1RingWrap; /* Wrap ring. */
}
#endif /* USE_MMIO */
dev->base_addr = ioaddr;
- rp = netdev_priv(dev);
- rp->quirks = quirks;
rhine_power_init(dev);
dev->irq = pdev->irq;
+ rp = netdev_priv(dev);
spin_lock_init(&rp->lock);
rp->pdev = pdev;
+ rp->quirks = quirks;
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
next += sizeof(struct rx_desc);
rp->rx_ring[i].next_desc = cpu_to_le32(next);
- rp->rx_skbuff[i] = NULL;
+ rp->rx_skbuff[i] = 0;
}
/* Mark the last entry as wrapping the ring. */
rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
- rp->rx_skbuff[i] = NULL;
+ rp->rx_skbuff[i] = 0;
}
}
rp->dirty_tx = rp->cur_tx = 0;
next = rp->tx_ring_dma;
for (i = 0; i < TX_RING_SIZE; i++) {
- rp->tx_skbuff[i] = NULL;
+ rp->tx_skbuff[i] = 0;
rp->tx_ring[i].tx_status = 0;
rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
next += sizeof(struct tx_desc);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
- rp->tx_skbuff[i] = NULL;
- rp->tx_buf[i] = NULL;
+ rp->tx_skbuff[i] = 0;
+ rp->tx_buf[i] = 0;
}
}
struct velocity_info *vptr = dev->priv;
struct mac_regs * regs = vptr->mac_regs;
unsigned long flags;
- struct mii_ioctl_data *miidata = if_mii(ifr);
+ struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data);
int err;
switch (cmd) {
register u32 _crc;
_crc = crc;
- __asm__ __volatile__ (
+ __asm __volatile (
"xorl %%ebx, %%ebx\n"
"movl %2, %%esi\n"
"movl %3, %%ecx\n"
config ARLAN
tristate "Aironet Arlan 655 & IC2200 DS support"
- depends on NET_RADIO && ISA && !64BIT
+ depends on NET_RADIO && ISA
---help---
Aironet makes Arlan, a class of wireless LAN adapters. These use the
www.Telxon.com chip, which is also used on several similar cards.
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on NET_RADIO && ISA && (PCI || BROKEN)
+ depends on NET_RADIO && (ISA || PCI)
---help---
This is the standard Linux driver to support Cisco/Aironet ISA and
PCI 802.11 wireless cards.
static void wifi_setup(struct net_device *dev)
{
- dev->hard_header = NULL;
- dev->rebuild_header = NULL;
- dev->hard_header_cache = NULL;
- dev->header_cache_update= NULL;
+ dev->hard_header = 0;
+ dev->rebuild_header = 0;
+ dev->hard_header_cache = 0;
+ dev->header_cache_update= 0;
dev->hard_header_parse = wll_header_parse;
dev->hard_start_xmit = &airo_start_xmit11;
}
ai = dev->priv;
- ai->wifidev = NULL;
+ ai->wifidev = 0;
ai->flags = 0;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
printk(KERN_DEBUG "airo: Found an MPI350 card\n");
struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia )
{
- return _init_airo_card ( irq, port, is_pcmcia, NULL);
+ return _init_airo_card ( irq, port, is_pcmcia, 0);
}
EXPORT_SYMBOL(init_airo_card);
.release = proc_close
};
-static struct proc_dir_entry *airo_entry;
+static struct proc_dir_entry *airo_entry = 0;
struct proc_data {
int release_buffer;
(data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
index = data->wbuffer[0] - '0';
if (data->wbuffer[1] == '\n') {
- set_wep_key(ai, index, NULL, 0, 1, 1);
+ set_wep_key(ai, index, 0, 0, 1, 1);
return;
}
j = 2;
}
data->writelen = 0;
data->maxwritelen = 0;
- data->wbuffer = NULL;
- data->on_close = NULL;
+ data->wbuffer = 0;
+ data->on_close = 0;
if (file->f_mode & FMODE_WRITE) {
if (!(file->f_mode & FMODE_READ)) {
static struct net_device_list {
struct net_device *dev;
struct net_device_list *next;
-} *airo_devices;
+} *airo_devices = 0;
/* Since the card doesn't automatically switch to the right WEP mode,
we will make it do it. If the card isn't associated, every secs we
break;
case AUTH_SHAREDKEY:
if (apriv->keyindex < auto_wep) {
- set_wep_key(apriv, apriv->keyindex, NULL, 0, 0, 0);
+ set_wep_key(apriv, apriv->keyindex, 0, 0, 0, 0);
apriv->config.authType = AUTH_SHAREDKEY;
apriv->keyindex++;
} else {
/* Drop to ENCRYPT */
apriv->keyindex = 0;
- set_wep_key(apriv, apriv->defindex, NULL, 0, 0, 0);
+ set_wep_key(apriv, apriv->defindex, 0, 0, 0, 0);
apriv->config.authType = AUTH_ENCRYPT;
}
break;
/* Do we want to just set the transmit key index ? */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) {
- set_wep_key(local, index, NULL, 0, 1, 1);
+ set_wep_key(local, index, 0, 0, 1, 1);
} else
/* Don't complain if only change the mode */
if(!dwrq->flags & IW_ENCODE_MODE) {
if (card->vaddr)
iounmap(card->vaddr);
- card->vaddr = NULL;
+ card->vaddr = 0;
macio_release_resource(mdev, 0);
static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
*lenp = pos;
if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
else
{
*lenp = 0;
static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, txBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, rxBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static char conf_reset_result[200];
static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int pos = 0;
int devnum = ctl->procname[6] - '0';
return -1;
*lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int pos = 0;
int devnum = ctl->procname[5] - '0';
} else
return -1;
*lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
extern int arlan_entry_debug;
extern int arlan_exit_debug;
extern int testMemory;
+extern const char* arlan_version;
extern int arlan_command(struct net_device * dev, int command);
#define SIDUNKNOWN -1
memcpy(header.addr3, priv->CurrentBSSID, 6);
if (priv->wep_is_on) {
- auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY);
+ auth.alg = C80211_MGMT_AAN_SHAREDKEY;
/* no WEP for authentication frames with TrSeqNo 1 */
if (priv->CurrentAuthentTransactionSeqNum != 1)
header.frame_ctl |= cpu_to_le16(IEEE802_11_FCTL_WEP);
} else {
- auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
+ auth.alg = C80211_MGMT_AAN_OPENSYSTEM;
}
auth.status = 0;
*
*/
+#define __KERNEL_SYSCALLS__
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
{
struct iw_range *range = (struct iw_range *) extra;
islpci_private *priv = netdev_priv(ndev);
- u8 *data;
+ char *data;
int i, m, rvalue;
struct obj_frequencies *freq;
union oid_res_t r;
i = 0;
while ((i < IW_MAX_BITRATES) && (*data != 0)) {
/* the result must be in bps. The card gives us 500Kbps */
- range->bitrate[i] = *data * 500000;
+ range->bitrate[i] = (__s32) (*data >> 1);
+ range->bitrate[i] *= 1000000;
i++;
data++;
}
return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
}
- ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
- if (ret) {
- kfree(r.ptr);
+ if ((ret =
+ mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r)))
return ret;
- }
rate = (u32) (vwrq->value / 500000);
data = r.ptr;
}
if (!data[i]) {
- kfree(r.ptr);
return -EINVAL;
}
vwrq->value = r.u * 500000;
/* request the device for the enabled rates */
- rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r);
- if (rvalue) {
- kfree(r.ptr);
+ if ((rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r)))
return rvalue;
- }
data = r.ptr;
vwrq->fixed = (data[0] != 0) && (data[1] == 0);
kfree(r.ptr);
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_mgmtframe *response = NULL;
- int ret = -EIO;
+ int ret = -EIO, response_op = PIMFOR_OP_ERROR;
printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid);
data->length = 0;
islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
priv->priv_oid, extra, 256,
&response);
+ response_op = response->header->operation;
printk("%s: ret: %i\n", ndev->name, ret);
+ printk("%s: response_op: %i\n", ndev->name, response_op);
if (ret || !response
|| response->header->operation == PIMFOR_OP_ERROR) {
if (response) {
priv->priv_oid, extra, data->length,
&response);
printk("%s: ret: %i\n", ndev->name, ret);
- if (ret || !response
- || response->header->operation == PIMFOR_OP_ERROR) {
- if (response) {
- islpci_mgt_release(response);
- }
- printk("%s: EIO\n", ndev->name);
- ret = -EIO;
- }
if (!ret) {
response_op = response->header->operation;
printk("%s: response_op: %i\n", ndev->name,
response_op);
islpci_mgt_release(response);
}
+ if (ret || response_op == PIMFOR_OP_ERROR) {
+ printk("%s: EIO\n", ndev->name);
+ ret = -EIO;
+ }
}
return (ret ? ret : -EINPROGRESS);
#include "oid_mgt.h"
#define ISL3877_IMAGE_FILE "isl3877"
-#define ISL3886_IMAGE_FILE "isl3886"
#define ISL3890_IMAGE_FILE "isl3890"
static int prism54_bring_down(islpci_private *);
mdelay(50);
{
- const struct firmware *fw_entry = NULL;
+ const struct firmware *fw_entry = 0;
long fw_len;
const u32 *fw_ptr;
void *device = priv->device_base;
int powerstate = ISL38XX_PSM_POWERSAVE_STATE;
- /* lock the interrupt handler */
- spin_lock(&priv->slock);
-
/* received an interrupt request on a shared IRQ line
* first check whether the device is in sleep mode */
reg = readl(device + ISL38XX_CTRL_STAT_REG);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
#endif
- spin_unlock(&priv->slock);
return IRQ_NONE;
}
+ if (islpci_get_state(priv) != PRV_STATE_SLEEP)
+ powerstate = ISL38XX_PSM_ACTIVE_STATE;
+
+ /* lock the interrupt handler */
+ spin_lock(&priv->slock);
/* check whether there is any source of interrupt on the device */
reg = readl(device + ISL38XX_INT_IDENT_REG);
reg &= ISL38XX_INT_SOURCES;
if (reg != 0) {
- if (islpci_get_state(priv) != PRV_STATE_SLEEP)
- powerstate = ISL38XX_PSM_ACTIVE_STATE;
-
/* reset the request bits in the Identification register */
isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);
isl38xx_handle_wakeup(priv->control_block,
&powerstate, priv->device_base);
}
- } else {
-#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
-#endif
- spin_unlock(&priv->slock);
- return IRQ_NONE;
}
/* sleep -> ready */
if (priv->device_base)
iounmap(priv->device_base);
- priv->device_base = NULL;
+ priv->device_base = 0;
/* free consistent DMA area... */
if (priv->driver_mem_address)
priv->device_host_address);
/* clear some dangling pointers */
- priv->driver_mem_address = NULL;
+ priv->driver_mem_address = 0;
priv->device_host_address = 0;
priv->device_psm_buffer = 0;
- priv->control_block = NULL;
+ priv->control_block = 0;
/* clean up mgmt rx buffers */
for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
if (priv->data_low_rx[counter])
dev_kfree_skb(priv->data_low_rx[counter]);
- priv->data_low_rx[counter] = NULL;
+ priv->data_low_rx[counter] = 0;
}
/* Free the acces control list and the WPA list */
/* select the firmware file depending on the device id */
switch (pdev->device) {
- case 0x3877:
- strcpy(priv->firmware, ISL3877_IMAGE_FILE);
+ case PCIDEVICE_ISL3890:
+ case PCIDEVICE_3COM6001:
+ strcpy(priv->firmware, ISL3890_IMAGE_FILE);
break;
-
- case 0x3886:
- strcpy(priv->firmware, ISL3886_IMAGE_FILE);
+ case PCIDEVICE_ISL3877:
+ strcpy(priv->firmware, ISL3877_IMAGE_FILE);
break;
default:
do_islpci_free_memory:
islpci_free_memory(priv);
do_free_netdev:
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
return NULL;
}
MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
MODULE_LICENSE("GPL");
-static int init_pcitm = 0;
-module_param(init_pcitm, int, 0);
-
/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
static const struct pci_device_id prism54_id_tbl[] = {
- /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
+ /* 3COM 3CRWE154G72 Wireless LAN adapter */
{
- 0x1260, 0x3890,
- PCI_ANY_ID, PCI_ANY_ID,
+ PCIVENDOR_3COM, PCIDEVICE_3COM6001,
+ PCIVENDOR_3COM, PCIDEVICE_3COM6001,
0, 0, 0
},
- /* 3COM 3CRWE154G72 Wireless LAN adapter */
+ /* D-Link Air Plus Xtreme G A1 - DWL-g650 A1 */
{
- 0x10b7, 0x6001,
- PCI_ANY_ID, PCI_ANY_ID,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_DLINK, 0x3202UL,
+ 0, 0, 0
+ },
+
+ /* I-O Data WN-G54/CB - WN-G54/CB */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_IODATA, 0xd019UL,
+ 0, 0, 0
+ },
+
+ /* Netgear WG511 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_NETGEAR, 0x4800UL,
+ 0, 0, 0
+ },
+
+ /* Tekram Technology clones, Allnet, Netcomm, Zyxel */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_TTL, 0x1605UL,
+ 0, 0, 0
+ },
+
+ /* SMC2802W */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0x2802UL,
+ 0, 0, 0
+ },
+
+ /* SMC2835W */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0x2835UL,
+ 0, 0, 0
+ },
+
+ /* Corega CG-WLCB54GT */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_ATI, 0xc104UL,
+ 0, 0, 0
+ },
+
+ /* I4 Z-Com XG-600 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_I4, 0x0014UL,
+ 0, 0, 0
+ },
+
+ /* I4 Z-Com XG-900 and clones Macer, Ovislink, Planex, Peabird, */
+ /* Sitecom, Xterasys */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_I4, 0x0020UL,
+ 0, 0, 0
+ },
+
+ /* SMC 2802W V2 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_ACCTON, 0xee03UL,
+ 0, 0, 0
+ },
+
+ /* SMC 2835W V2 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0xa835UL,
0, 0, 0
},
/* Intersil PRISM Indigo Wireless LAN adapter */
{
- 0x1260, 0x3877,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3877,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0
},
- /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
+ /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
+ /* Default */
{
- 0x1260, 0x3886,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0
},
/* .enable_wake ; we don't support this yet */
};
+static void
+prism54_get_card_model(struct net_device *ndev)
+{
+ islpci_private *priv;
+ char *modelp;
+ int notwork = 0;
+
+ priv = netdev_priv(ndev);
+ switch (priv->pdev->subsystem_device) {
+ case PCIDEVICE_ISL3877:
+ modelp = "PRISM Indigo";
+ break;
+ case PCIDEVICE_ISL3886:
+ modelp = "PRISM Javelin / Xbow";
+ break;
+ case PCIDEVICE_3COM6001:
+ modelp = "3COM 3CRWE154G72";
+ break;
+ case 0x3202UL:
+ modelp = "D-Link DWL-g650 A1";
+ break;
+ case 0xd019UL:
+ modelp = "WN-G54/CB";
+ break;
+ case 0x4800UL:
+ modelp = "Netgear WG511";
+ break;
+ case 0x2802UL:
+ modelp = "SMC2802W";
+ break;
+ case 0xee03UL:
+ modelp = "SMC2802W V2";
+ notwork = 1;
+ break;
+ case 0x2835UL:
+ modelp = "SMC2835W";
+ break;
+ case 0xa835UL:
+ modelp = "SMC2835W V2";
+ notwork = 1;
+ break;
+ case 0xc104UL:
+ modelp = "CG-WLCB54GT";
+ break;
+ case 0x1605UL:
+ modelp = "Tekram Technology clone";
+ break;
+ /* Let's leave this one out for now since it seems bogus/wrong
+ * Even if the manufacturer did use 0x0000UL it may not be correct
+ * by their part, therefore deserving no name ;) */
+ /* case 0x0000UL:
+ * modelp = "SparkLAN WL-850F";
+ * break;*/
+
+ /* We have two reported for the one below :( */
+ case 0x0014UL:
+ modelp = "I4 Z-Com XG-600 and clones";
+ break;
+ case 0x0020UL:
+ modelp = "I4 Z-Com XG-900 and clones";
+ break;
+/* Default it */
+/*
+ case PCIDEVICE_ISL3890:
+ modelp = "PRISM Duette/GT";
+ break;
+*/
+ default:
+ modelp = "PRISM Duette/GT";
+ }
+ printk(KERN_DEBUG "%s: %s driver detected card model: %s\n",
+ ndev->name, DRV_NAME, modelp);
+ if ( notwork ) {
+ printk(KERN_DEBUG "%s: %s Warning - This may not work\n",
+ ndev->name, DRV_NAME);
+ }
+ return;
+}
+
/******************************************************************************
Module initialization functions
******************************************************************************/
*
* Writing zero to both these two registers will disable both timeouts and
* *can* solve problems caused by devices that are slow to respond.
- * Make this configurable - MSW
*/
- if ( init_pcitm >= 0 ) {
- pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
- pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
- } else {
- printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
- }
+ /* I am taking these out, we should not be poking around in the
+ * programmable timers - MSW
+ */
+/* Do not zero the programmable timers
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x41, 0);
+*/
/* request the pci device I/O regions */
rvalue = pci_request_regions(pdev, DRV_NAME);
/* firmware upload is triggered in islpci_open */
+ /* Pretty card model discovery output */
+ prism54_get_card_model(ndev);
+
return 0;
do_unregister_netdev:
unregister_netdev(ndev);
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
do_pci_release_regions:
pci_release_regions(pdev);
do_pci_disable_device:
prism54_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
if (!__in_cleanup_module) {
/* free the PCI memory and unmap the remapped page */
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
pci_release_regions(pdev);
prism54_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got suspend request (state %d)\n",
prism54_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
int err;
DEFINE_WAIT(wait);
- *recvframe = NULL;
-
if (down_interruptible(&priv->mgmt_sem))
return -ERESTARTSYS;
/* General driver definitions */
+#define PCIVENDOR_INTERSIL 0x1260UL
+#define PCIVENDOR_3COM 0x10b7UL
+#define PCIVENDOR_DLINK 0x1186UL
+#define PCIVENDOR_I4 0x17cfUL
+#define PCIVENDOR_IODATA 0x10fcUL
+#define PCIVENDOR_NETGEAR 0x1385UL
+#define PCIVENDOR_SMC 0x10b8UL
+#define PCIVENDOR_ACCTON 0x1113UL
+#define PCIVENDOR_ATI 0x1259UL
+#define PCIVENDOR_TTL 0x16a5UL
+
+#define PCIDEVICE_ISL3877 0x3877UL
+#define PCIDEVICE_ISL3886 0x3886UL
+#define PCIDEVICE_ISL3890 0x3890UL
+#define PCIDEVICE_3COM6001 0x6001UL
#define PCIDEVICE_LATENCY_TIMER_MIN 0x40
#define PCIDEVICE_LATENCY_TIMER_VAL 0x50
OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002),
OID_U32_C(OID_INL_MODE, 0xFF020003),
OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004),
- OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW),
+ OID_UNKNOWN(OID_INL_VERSION, 0xFF020005),
OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006),
OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007),
OID_U32_C(OID_INL_CONFIG, 0xFF020008),
mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
{
int ret = 0;
- struct islpci_mgmtframe *response = NULL;
+ struct islpci_mgmtframe *response;
int response_op = PIMFOR_OP_ERROR;
int dlen;
void *cache, *_data = data;
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(extra > isl_oid[n].range);
- res->ptr = NULL;
-
if (!priv->mib)
/* memory has been freed */
return -1;
DOT11_OID_DEFKEYID,
DOT11_OID_DOT1XENABLE,
OID_INL_DOT11D_CONFORMANCE,
- /* Do not initialize this - fw < 1.0.4.3 rejects it
OID_INL_OUTPUTPOWER,
- */
};
/* update the MAC addr. */
static int
mgt_update_addr(islpci_private *priv)
{
- struct islpci_mgmtframe *res = NULL;
+ struct islpci_mgmtframe *res;
int ret;
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
- {NULL,},
+ {0,},
};
static struct pci_device_id yellowfin_pci_tbl[] = {
#ifdef NO_TXSTATS
/* In this mode the Tx ring needs only a single descriptor. */
for (i = 0; i < TX_RING_SIZE; i++) {
- yp->tx_skbuff[i] = NULL;
+ yp->tx_skbuff[i] = 0;
yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- yp->tx_skbuff[entry] = NULL;
+ yp->tx_skbuff[entry] = 0;
}
if (yp->tx_full
&& yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
if (yp->rx_skbuff[i]) {
dev_kfree_skb(yp->rx_skbuff[i]);
}
- yp->rx_skbuff[i] = NULL;
+ yp->rx_skbuff[i] = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (yp->tx_skbuff[i])
dev_kfree_skb(yp->tx_skbuff[i]);
- yp->tx_skbuff[i] = NULL;
+ yp->tx_skbuff[i] = 0;
}
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
{
- return simple_read_from_buffer(buf, count, offset, str, strlen(str));
+ size_t len = strlen(str);
+
+ if (!count)
+ return 0;
+
+ if (*offset > len)
+ return 0;
+
+ if (count > len - *offset)
+ count = len - *offset;
+
+ if (copy_to_user(buf, str + *offset, count))
+ return -EFAULT;
+
+ *offset += count;
+
+ return count;
}
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
{
char tmpbuf[TMPBUFSIZE];
- size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+ size_t maxlen;
+
+ if (!count)
+ return 0;
+
+ spin_lock(&oprofilefs_lock);
+ maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+ spin_unlock(&oprofilefs_lock);
if (maxlen > TMPBUFSIZE)
maxlen = TMPBUFSIZE;
- return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+
+ if (*offset > maxlen)
+ return 0;
+
+ if (count > maxlen - *offset)
+ count = maxlen - *offset;
+
+ if (copy_to_user(buf, tmpbuf + *offset, count))
+ return -EFAULT;
+
+ *offset += count;
+
+ return count;
}
struct dino_device *dino_dev; // Dino specific control struct
const char *version = "unknown";
const int name_len = 32;
- char hw_path[64];
char *name;
int is_cujo = 0;
struct pci_bus *bus;
-
+
name = kmalloc(name_len, GFP_KERNEL);
- if(name) {
- print_pa_hwpath(dev, hw_path);
- snprintf(name, name_len, "Dino [%s]", hw_path);
- }
+ if(name)
+ snprintf(name, name_len, "Dino %s", dev->dev.bus_id);
else
name = "Dino";
sg_dma_len(startsg) = 0;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
n_mappings++;
-#if defined(ZX1_SUPPORT)
- /* Pluto IOMMU IO Virt Address is not zero based */
- sg_dma_address(dma_sg) = pide | ioc->ibase;
-#else
- /* SBA, ccio, and dino are zero based.
- * Trying to save a few CPU cycles for most users.
- */
sg_dma_address(dma_sg) = pide;
-#endif
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
prefetchw(pdirp);
}
#include <asm/byteorder.h> /* get in-line asm for swab */
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
-#include <asm/io.h> /* read/write functions */
+#include <asm/io.h> /* gsc_read/write functions */
#ifdef CONFIG_SUPERIO
#include <asm/superio.h>
#endif
#endif
-#define IOSAPIC_REG_SELECT 0x00
+#define READ_U8(addr) gsc_readb(addr)
+#define READ_U16(addr) le16_to_cpu(gsc_readw((u16 *) (addr)))
+#define READ_U32(addr) le32_to_cpu(gsc_readl((u32 *) (addr)))
+#define READ_REG16(addr) gsc_readw((u16 *) (addr))
+#define READ_REG32(addr) gsc_readl((u32 *) (addr))
+#define WRITE_U8(value, addr) gsc_writeb(value, addr)
+#define WRITE_U16(value, addr) gsc_writew(cpu_to_le16(value), (u16 *) (addr))
+#define WRITE_U32(value, addr) gsc_writel(cpu_to_le32(value), (u32 *) (addr))
+#define WRITE_REG16(value, addr) gsc_writew(value, (u16 *) (addr))
+#define WRITE_REG32(value, addr) gsc_writel(value, (u32 *) (addr))
+
+
+#define IOSAPIC_REG_SELECT 0
#define IOSAPIC_REG_WINDOW 0x10
#define IOSAPIC_REG_EOI 0x40
#define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2)
#define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2)
-static inline unsigned int iosapic_read(unsigned long iosapic, unsigned int reg)
-{
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- return readl(iosapic + IOSAPIC_REG_WINDOW);
-}
-
-static inline void iosapic_write(unsigned long iosapic, unsigned int reg, u32 val)
-{
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- writel(val, iosapic + IOSAPIC_REG_WINDOW);
-}
-
/*
+** FIXME: revisit which GFP flags we should really be using.
** GFP_KERNEL includes __GFP_WAIT flag and that may not
** be acceptable. Since this is boot time, we shouldn't have
** to wait ever and this code should (will?) never get called
#define IOSAPIC_UNLOCK(lck) spin_unlock_irqrestore(lck, irqflags)
-#define IOSAPIC_VERSION_MASK 0x000000ff
-#define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK))
+#define IOSAPIC_VERSION_MASK 0x000000ff
+#define IOSAPIC_VERSION_SHIFT 0x0
+#define IOSAPIC_VERSION(ver) \
+ (int) ((ver & IOSAPIC_VERSION_MASK) >> IOSAPIC_VERSION_SHIFT)
#define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000
+
#define IOSAPIC_MAX_ENTRY_SHIFT 0x10
-#define IOSAPIC_IRDT_MAX_ENTRY(ver) \
- (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
+#define IOSAPIC_IRDT_MAX_ENTRY(ver) \
+ (int) ((ver&IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
/* bits in the "low" I/O Sapic IRdT entry */
#define IOSAPIC_IRDT_ENABLE 0x10000
#define IOSAPIC_IRDT_ID_EID_SHIFT 0x10
+
+#define IOSAPIC_EOI(eoi_addr, eoi_data) gsc_writel(eoi_data, eoi_addr)
+
static struct iosapic_info *iosapic_list;
static spinlock_t iosapic_lock;
static int iosapic_count;
struct irt_entry *p = table;
int i;
- printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
- printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
+ printk(KERN_DEBUG MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
+ printk(KERN_DEBUG MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
table,
num_entries,
(int) sizeof(struct irt_entry));
for (i = 0 ; i < num_entries ; i++, p++) {
- printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
+ printk(KERN_DEBUG MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
p->entry_type, p->entry_length, p->interrupt_type,
p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
p->src_seg_id, p->dest_iosapic_intin,
static irqreturn_t
iosapic_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct vector_info *vi = (struct vector_info *) dev_id;
+ struct vector_info *vi = (struct vector_info *)dev_id;
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
int irq_num = vi->iosapic->isi_region->data.irqbase + vi->irqline;
- DBG("iosapic_interrupt(): irq %d line %d eoi 0x%p 0x%x\n",
- irq, vi->irqline, vi->eoi_addr, vi->eoi_data);
-
- /* Do NOT need to mask/unmask IRQ. processor is already masked. */
+ DBG("iosapic_interrupt(): irq %d line %d eoi %p\n",
+ irq, vi->irqline, vi->eoi_addr);
+/* FIXME: Need to mask/unmask? processor IRQ is already masked... */
do_irq(&vi->iosapic->isi_region->action[vi->irqline], irq_num, regs);
/*
- ** PARISC only supports PCI devices below I/O SAPIC.
** PCI only supports level triggered in order to share IRQ lines.
- ** ergo I/O SAPIC must always issue EOI on parisc.
- **
- ** i386/ia64 support ISA devices and have to deal with
- ** edge-triggered interrupts too.
+ ** I/O SAPIC must always issue EOI.
*/
- __raw_writel(vi->eoi_data, vi->eoi_addr);
+ IOSAPIC_EOI(vi->eoi_addr, vi->eoi_data);
+
return IRQ_HANDLED;
}
ASSERT(tmp == 0);
vi->eoi_addr = (u32 *) (isi->isi_hpa + IOSAPIC_REG_EOI);
- vi->eoi_data = cpu_to_le32(vi->txn_data);
+ vi->eoi_data = cpu_to_le32(vi->irqline);
+
ASSERT(NULL != isi->isi_region);
DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n",
struct iosapic_info *isp = vi->iosapic;
u8 idx = vi->irqline;
- *dp0 = iosapic_read(isp->isi_hpa, IOSAPIC_IRDT_ENTRY(idx));
- *dp1 = iosapic_read(isp->isi_hpa, IOSAPIC_IRDT_ENTRY_HI(idx));
+ /* point the window register to the lower word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY(idx), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ *dp0 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+
+ /* point the window register to the higher word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY_HI(idx), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ *dp1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
}
ASSERT(NULL != isp);
ASSERT(0 != isp->isi_hpa);
- DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %p 0x%x 0x%x\n",
+ DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %p WINDOW %p 0x%x 0x%x\n",
vi->irqline,
- isp->isi_hpa,
+ isp->isi_hpa, isp->isi_hpa+IOSAPIC_REG_WINDOW,
dp0, dp1);
- iosapic_write(isp->isi_hpa, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0);
+ /* point the window register to the lower word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY(vi->irqline), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ WRITE_U32( dp0, isp->isi_hpa+IOSAPIC_REG_WINDOW);
/* Read the window register to flush the writes down to HW */
- dp0 = readl(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+ dp0 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
- iosapic_write(isp->isi_hpa, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1);
+ /* point the window register to the higher word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY_HI(vi->irqline), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ WRITE_U32( dp1, isp->isi_hpa+IOSAPIC_REG_WINDOW);
/* Read the window register to flush the writes down to HW */
- dp1 = readl(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+ dp1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
}
iosapic_set_irt_data(vi, &d0, &d1);
iosapic_wr_irt_entry(vi, d0, d1);
+
#ifdef DEBUG_IOSAPIC_IRT
{
u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL);
printk("iosapic_enable_irq(): regs %p", vi->eoi_addr);
- for ( ; t < vi->eoi_addr; t++)
- printk(" %x", readl(t));
+ while (t < vi->eoi_addr) printk(" %x", READ_U32(t++));
printk("\n");
}
struct iosapic_info *isp = vi->iosapic;
for (d0=0x10; d0<0x1e; d0++) {
- d1 = iosapic_read(isp->isi_hpa, d0);
+ /* point the window register to the lower word */
+ WRITE_U32(d0, isp->isi_hpa+IOSAPIC_REG_SELECT);
+
+ /* read the word */
+ d1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
printk(" %x", d1);
}
}
#endif
/*
- ** Issueing I/O SAPIC an EOI causes an interrupt IFF IRQ line is
- ** asserted. IRQ generally should not be asserted when a driver
- ** enables their IRQ. It can lead to "interesting" race conditions
- ** in the driver initialization sequence.
+ ** KLUGE: IRQ should not be asserted when Drivers enabling their IRQ.
+ ** PCI supports level triggered in order to share IRQ lines.
+ **
+ ** Issueing I/O SAPIC an EOI causes an interrupt iff IRQ line is
+ ** asserted.
*/
- __raw_writel(vi->eoi_data, vi->eoi_addr);
+ IOSAPIC_EOI(vi->eoi_addr, vi->eoi_data);
}
ASSERT(isi);
ASSERT(isi->isi_hpa);
- return iosapic_read(isi->isi_hpa, IOSAPIC_REG_VERSION);
+ /* point window to the version register */
+ WRITE_U32(IOSAPIC_REG_VERSION, isi->isi_hpa+IOSAPIC_REG_SELECT);
+
+ /* now read the version register */
+ return (READ_U32(isi->isi_hpa+IOSAPIC_REG_WINDOW));
}
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/init.h> /* for __init and __devinit */
+/* #define PCI_DEBUG enable ASSERT */
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/irq.h> /* for struct irq_region support */
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
#define DBG_PAT(x...)
#endif
-#ifdef DEBUG_LBA
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
/*
** Config accessor functions only pass in the 8-bit bus number and not
** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
-#define LBA_BUS_MODE 0x0620
-
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
/* non-postable I/O port space, densely packed */
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
#define LBA_ASTRO_PORT_BASE (0xfffffffffee00000UL)
#else
#define LBA_ASTRO_PORT_BASE (0xfee00000UL)
#endif
-#define ELROY_HVERS 0x782
-#define MERCURY_HVERS 0x783
-#define QUICKSILVER_HVERS 0x784
-
-static inline int IS_ELROY(struct parisc_device *d)
-{
- return (d->id.hversion == ELROY_HVERS);
-}
-
-static inline int IS_MERCURY(struct parisc_device *d)
-{
- return (d->id.hversion == MERCURY_HVERS);
-}
-
-static inline int IS_QUICKSILVER(struct parisc_device *d)
-{
- return (d->id.hversion == QUICKSILVER_HVERS);
-}
-
/*
** lba_device: Per instance Elroy data structure
spinlock_t lba_lock;
void *iosapic_obj;
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
unsigned long iop_base; /* PA_VIEW - for IO port accessor funcs */
#endif
{
u8 first_bus = d->hba.hba_bus->secondary;
u8 last_sub_bus = d->hba.hba_bus->subordinate;
+#if 0
+/* FIXME - see below in this function */
+ u8 dev = PCI_SLOT(dfn);
+ u8 func = PCI_FUNC(dfn);
+#endif
ASSERT(bus >= first_bus);
ASSERT(bus <= last_sub_bus);
return(FALSE);
}
+#if 0
+/*
+** FIXME: Need to implement code to fill the devices bitmap based
+** on contents of the local pci_bus tree "data base".
+** pci_register_ops() walks the bus for us and builds the tree.
+** For now, always do the config cycle.
+*/
+ bus -= first_bus;
+
+ return (((d->devices[bus][dev]) >> func) & 0x1);
+#else
return TRUE;
+#endif
}
return(data);
}
-#ifdef CONFIG_PARISC64
-#define pat_cfg_addr(bus, devfn, addr) (((bus) << 16) | ((devfn) << 8) | (addr))
-
-static int pat_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
-{
- int tok = pat_cfg_addr(bus->number, devfn, pos);
- u32 tmp;
- int ret = pdc_pat_io_pci_cfg_read(tok, size, &tmp);
-
- DBG_CFG("%s(%d:%d.%d+0x%02x) -> 0x%x %d\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, tmp, ret);
-
- switch (size) {
- case 1: *data = (u8) tmp; return (tmp == (u8) ~0);
- case 2: *data = (u16) tmp; return (tmp == (u16) ~0);
- case 4: *data = (u32) tmp; return (tmp == (u32) ~0);
- }
- *data = ~0;
- return (ret);
-}
-
-static int pat_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
-{
- int tok = pat_cfg_addr(bus->number, devfn, pos);
- int ret = pdc_pat_io_pci_cfg_write(tok, size, data);
-
- DBG_CFG("%s(%d:%d.%d+0x%02x, 0x%lx/%d)\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, data, size);
- return (ret);
-}
-
-static struct pci_ops pat_cfg_ops = {
- .read = pat_cfg_read,
- .write = pat_cfg_write,
-};
-#else
-/* keep the compiler from complaining about undeclared variables */
-#define pat_cfg_ops lba_cfg_ops
-#endif
static int lba_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
{
}
DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
-
/* Basic Algorithm */
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
switch(size) {
}
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
** Determine if a device is already configured.
}
}
}
-#else
-#define lba_claim_dev_resources(dev)
#endif
lba_dump_res(&iomem_resource, 2);
}
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
if (ldev->hba.gmmio_space.flags) {
err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
if (err < 0) {
bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
#endif
+#ifdef __LP64__
if (is_pdc_pat()) {
/* Claim resources for PDC's devices */
lba_claim_dev_resources(dev);
}
+#endif
/*
** P2PB's have no IRQs. ignore them.
};
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
#define PIOP_TO_GMMIO(lba, addr) \
((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
}
}
}
-#else
-/* keep compiler from complaining about missing declarations */
-#define lba_pat_port_ops lba_astro_port_ops
-#define lba_pat_resources(pa_dev, lba_dev)
-#endif /* CONFIG_PARISC64 */
+#endif /* __LP64__ */
static void
unsigned long rsize;
int lba_num;
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
** Sign extend all BAR values on "legacy" platforms.
** "Sprockets" PDC (Forte/Allegro) initializes everything
printk("\n");
#endif /* DEBUG_LBA_PAT */
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
* FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
* Only N-Class and up can really make use of Get slot status.
** have work to do.
*/
static int __init
-lba_driver_probe(struct parisc_device *dev)
+lba_driver_callback(struct parisc_device *dev)
{
struct lba_device *lba_dev;
struct pci_bus *lba_bus;
/* Read HW Rev First */
func_class = READ_REG32(dev->hpa + LBA_FCLASS);
-
- if (IS_ELROY(dev)) {
- func_class &= 0xf;
- switch (func_class) {
- case 0: version = "TR1.0"; break;
- case 1: version = "TR2.0"; break;
- case 2: version = "TR2.1"; break;
- case 3: version = "TR2.2"; break;
- case 4: version = "TR3.0"; break;
- case 5: version = "TR4.0"; break;
- default: version = "TR4+";
- }
- printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
- MODULE_NAME, version, func_class & 0xf, dev->hpa);
-
- /* Just in case we find some prototypes... */
- } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
- func_class &= 0xff;
- version = kmalloc(6, GFP_KERNEL);
- sprintf(version,"TR%d.%d",(func_class >> 4),(func_class & 0xf));
- /* We could use one printk for both and have it outside,
- * but for the mask for func_class.
- */
- printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
- MODULE_NAME, version, func_class & 0xff, dev->hpa);
+ func_class &= 0xf;
+
+ switch (func_class) {
+ case 0: version = "TR1.0"; break;
+ case 1: version = "TR2.0"; break;
+ case 2: version = "TR2.1"; break;
+ case 3: version = "TR2.2"; break;
+ case 4: version = "TR3.0"; break;
+ case 5: version = "TR4.0"; break;
+ default: version = "TR4+";
}
+ printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
+ MODULE_NAME, version, func_class & 0xf, dev->hpa);
+
+ /* Just in case we find some prototypes... */
if (func_class < 2) {
- printk(KERN_WARNING "Can't support LBA older than TR2.1"
- " - continuing under adversity.\n");
+ printk(KERN_WARNING "Can't support LBA older than TR2.1 "
+ "- continuing under adversity.\n");
}
/*
/* ---------- Third : setup I/O Port and MMIO resources --------- */
+#ifdef __LP64__
if (is_pdc_pat()) {
/* PDC PAT firmware uses PIOP region of GMMIO space. */
pci_port = &lba_pat_port_ops;
+
/* Go ask PDC PAT what resources this LBA has */
lba_pat_resources(dev, lba_dev);
- } else {
+ } else
+#endif
+ {
/* Sprockets PDC uses NPIOP region */
pci_port = &lba_astro_port_ops;
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
- is_pdc_pat() ? &pat_cfg_ops : &lba_cfg_ops,
- NULL);
+ &lba_cfg_ops, NULL);
+#ifdef __LP64__
if (is_pdc_pat()) {
/* assign resources to un-initialized devices */
DBG_PAT("LBA pci_bus_assign_resources()\n");
lba_dump_res(&lba_dev->hba.lmmio_space, 2);
#endif
}
+#endif
/*
** Once PCI register ops has walked the bus, access to config
}
static struct parisc_device_id lba_tbl[] = {
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
+ { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x782, 0xa },
{ 0, }
};
static struct parisc_driver lba_driver = {
.name = MODULE_NAME,
.id_table = lba_tbl,
- .probe = lba_driver_probe,
+ .probe = lba_driver_callback,
};
/*
static int led_proc_write(struct file *file, const char *buf,
unsigned long count, void *data)
{
- char *cur, lbuf[count + 1];
+ char *cur, lbuf[count];
int d;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- memset(lbuf, 0, count + 1);
+ memset(lbuf, 0, count);
if (copy_from_user(lbuf, buf, count))
return -EFAULT;
break;
case LED_HASLCD:
- if (*cur && cur[strlen(cur)-1] == '\n')
+ while (*cur && cur[strlen(cur)-1] == '\n')
cur[strlen(cur)-1] = 0;
if (*cur == 0)
cur = lcd_text_default;
#include <linux/mm.h>
#include <linux/string.h>
+#undef PCI_DEBUG /* for ASSERT */
#include <linux/pci.h>
+#undef PCI_DEBUG
#include <asm/byteorder.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
#include <asm/pdc.h> /* for PDC_MODEL_* */
-#include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h>
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
#define MODULE_NAME "SBA"
#ifdef CONFIG_PROC_FS
** Don't even think about messing with it unless you have
** plenty of 710's to sacrifice to the computer gods. :^)
*/
-#undef DEBUG_SBA_ASSERT
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_DMB_TRAP
+#define SBA_INLINE __inline__
+
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
-#ifdef DEBUG_SBA_ASSERT
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
-#if defined(__LP64__) && !defined(CONFIG_PDC_NARROW)
-/* "low end" PA8800 machines use ZX1 chipset */
-#define ZX1_SUPPORT
-#endif
-
-#define SBA_INLINE __inline__
-
-
/*
** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes.
#define REOG_MERCED_PORT 0x805
#define REOG_ROPES_PORT 0x783
-#define PLUTO_MCKINLEY_PORT 0x880
-#define PLUTO_ROPES_PORT 0x784
-
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define IS_IKE(id) \
(((id)->hversion == IKE_MERCED_PORT) || ((id)->hversion == IKE_ROPES_PORT))
-#define IS_PLUTO(id) \
-(((id)->hversion == PLUTO_MCKINLEY_PORT) || ((id)->hversion == PLUTO_ROPES_PORT))
-
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET 0x20000
/* Ike's IOC's occupy functions 2 and 3 (not 0 and 1) */
#define IKE_IOC_OFFSET(p) ((p+2)*SBA_FUNC_SIZE)
-#define PLUTO_IOC_OFFSET 0x1000
-
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
-#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
+#define MAX_IOC 2 /* per Ike. Astro only has 1 */
/*
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
-/* AGP GART driver looks for this */
-#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
-
+#define IOC_IOVA_SPACE_BASE 0 /* IOVA ranges start at 0 */
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
-** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
+** IOVP_SIZE could only be greater than PAGE_SIZE if we are
+** confident the drivers really only touch the next physical
+** page iff that driver instance owns it.
*/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
unsigned long ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
- unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
- unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
-#ifdef ZX1_SUPPORT
- unsigned long iovp_mask; /* help convert IOVA to IOVP */
-#endif
+
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
+ unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
-#if SBA_HINT_SUPPORT
-/* FIXME : DMA HINTs not used */
- unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int hint_shift_pdir;
-#endif
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
/* STUFF We don't need in performance path */
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
+ unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
+ unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
};
struct sba_device {
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
-#if SBA_AGP_SUPPORT
-static int reserve_sba_gart = 1;
-#endif
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
sba_dump_tlb(unsigned long hpa)
{
DBG_INIT("IO TLB at 0x%lx\n", hpa);
- DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
- DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
- DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
- DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
+ DBG_INIT("IOC_IBASE : %Lx\n", READ_REG64(hpa+IOC_IBASE));
+ DBG_INIT("IOC_IMASK : %Lx\n", READ_REG64(hpa+IOC_IMASK));
+ DBG_INIT("IOC_TCNFG : %Lx\n", READ_REG64(hpa+IOC_TCNFG));
+ DBG_INIT("IOC_PDIR_BASE: %Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
-#else
-#define sba_dump_ranges(x)
-#define sba_dump_tlb(x)
#endif
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
+#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
+#define SBA_IOVP(ioc,iova) ((iova) & ioc->hint_mask_pdir)
-#ifdef ZX1_SUPPORT
-/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
-#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
-#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
-#else
-/* only support Astro and ancestors. Saves a few cycles in key places */
-#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
-#define SBA_IOVP(ioc,iova) (iova)
-#endif
-
+/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
+#define MKIOVP(dma_hint,pide) (dma_addr_t)((long)(dma_hint) | ((long)(pide) << IOVP_SHIFT))
+#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
*
***************************************************************/
-#if SBA_HINT_SUPPORT
#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
-#endif
+
typedef unsigned long space_t;
#define KERNEL_SPACE 0
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1)
* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
- * pdir_ptr (arg0).
- * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
- * for Astro/Ike looks like:
- *
+ * pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as
+ * shown below (MSB == bit 0):
*
* 0 19 51 55 63
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[43:12] | U | VI |
* +-+---------------------+----------------------------------+----+--------+
*
- * Pluto is basically identical, supports fewer physical address bits:
- *
- * 0 23 51 55 63
- * +-+------------------------+-------------------------------+----+--------+
- * |V| U | PPN[39:12] | U | VI |
- * +-+------------------------+-------------------------------+----+--------+
- *
- * V == Valid Bit (Most Significant Bit is bit 0)
+ * V == Valid Bit
* U == Unused
* PPN == Physical Page Number
* VI == Virtual Index (aka Coherent Index)
*
- * LPA instruction output is put into PPN field.
- * LCI (Load Coherence Index) instruction provides the "VI" bits.
+ * The physical address fields are filled with the results of the LPA
+ * instruction. The virtual index field is filled with the results of
+ * of the LCI (Load Coherence Index) instruction. The 8 bits used for
+ * the virtual index are bits 12:19 of the value returned by LCI.
*
- * We pre-swap the bytes since PCX-W is Big Endian and the
- * IOMMU uses little endian for the pdir.
+ * We need to pre-swap the bytes since PCX-W is Big Endian.
*/
ASSERT(sid == KERNEL_SPACE);
pa = virt_to_phys(vba);
- pa &= IOVP_MASK;
+ pa &= ~4095ULL; /* clear out offset bits */
mtsp(sid,1);
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
} while (byte_cnt > 0);
}
- WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
+ WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);
}
/**
pide = sba_alloc_range(ioc, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
- DBG_RUN("%s() 0x%p -> 0x%lx\n",
+ DBG_RUN("%s() 0x%p -> 0x%lx",
__FUNCTION__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
- DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
+ DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
(u8) (((u8 *) pdir_start)[7]),
(u8) (((u8 *) pdir_start)[6]),
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
- sba_mark_invalid(ioc, iova, size);
-
#if DELAYED_RESOURCE_CNT > 0
- /* Delaying when we re-use a IO Pdir entry reduces the number
- * of MMIO reads needed to flush writes to the PCOM register.
- */
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
int cnt = ioc->saved_cnt;
while (cnt--) {
+ sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
}
#else /* DELAYED_RESOURCE_CNT == 0 */
+ sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
return (void *) pdir_base;
}
-static void
-sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
-{
- /* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */
- extern void lba_set_iregs(struct parisc_device *, u32, u32);
-
- u32 iova_space_mask;
- u32 iova_space_size;
- int iov_order, tcnfg;
- struct parisc_device *lba;
-#if SBA_AGP_SUPPORT
- int agp_found = 0;
-#endif
- /*
- ** Firmware programs the base and size of a "safe IOVA space"
- ** (one that doesn't overlap memory or LMMIO space) in the
- ** IBASE and IMASK registers.
- */
- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
- iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
-
- if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
- printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
- iova_space_size /= 2;
- }
-
- /*
- ** iov_order is always based on a 1GB IOVA space since we want to
- ** turn on the other half for AGP GART.
- */
- iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
- ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
-
- DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
- __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
- iov_order + PAGE_SHIFT);
-
- ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
- get_order(ioc->pdir_size));
- if (!ioc->pdir_base)
- panic("Couldn't allocate I/O Page Table\n");
-
- memset(ioc->pdir_base, 0, ioc->pdir_size);
-
- DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, ioc->pdir_size);
-
-#if SBA_HINT_SUPPORT
- ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
- ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
-
- DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
- ioc->hint_shift_pdir, ioc->hint_mask_pdir);
-#endif
-
- ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
- WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
-
- /* build IMASK for IOC and Elroy */
- iova_space_mask = 0xffffffff;
- iova_space_mask <<= (iov_order + PAGE_SHIFT);
- ioc->imask = iova_space_mask;
-#ifdef ZX1_SUPPORT
- ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
-#endif
- sba_dump_tlb(ioc->ioc_hpa);
-
- /*
- ** setup Mercury IBASE/IMASK registers as well.
- */
- for (lba = sba->child; lba; lba = lba->sibling) {
- int rope_num = (lba->hpa >> 13) & 0xf;
- if (rope_num >> 3 == ioc_num)
- lba_set_iregs(lba, ioc->ibase, ioc->imask);
- }
-
- WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
-
-#ifdef __LP64__
- /*
- ** Setting the upper bits makes checking for bypass addresses
- ** a little faster later on.
- */
- ioc->imask |= 0xFFFFFFFF00000000UL;
-#endif
-
- /* Set I/O PDIR Page size to system page size */
- switch (PAGE_SHIFT) {
- case 12: tcnfg = 0; break; /* 4K */
- case 13: tcnfg = 1; break; /* 8K */
- case 14: tcnfg = 2; break; /* 16K */
- case 16: tcnfg = 3; break; /* 64K */
- default:
- panic(__FILE__ "Unsupported system page size %d",
- 1 << PAGE_SHIFT);
- break;
- }
- WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
-
- /*
- ** Program the IOC's ibase and enable IOVA translation
- ** Bit zero == enable bit.
- */
- WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
-
- /*
- ** Clear I/O TLB of any possible entries.
- ** (Yes. This is a bit paranoid...but so what)
- */
- WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
-
-#if SBA_AGP_SUPPORT
- /*
- ** If an AGP device is present, only use half of the IOV space
- ** for PCI DMA. Unfortunately we can't know ahead of time
- ** whether GART support will actually be used, for now we
- ** can just key on any AGP device found in the system.
- ** We program the next pdir index after we stop w/ a key for
- ** the GART code to handshake on.
- */
- device=NULL;
- for (lba = sba->child; lba; lba = lba->sibling) {
- if (IS_QUICKSILVER(lba))
- break;
- }
-
- if (lba) {
- DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__);
- ioc->pdir_size /= 2;
- ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE;
- } else {
- DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
- }
-#endif /* 0 */
-
-}
static void
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
__FUNCTION__, ioc->ioc_hpa, (int) (physmem>>20),
iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size);
- ioc->pdir_base = sba_alloc_pdir(pdir_size);
-
- DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, pdir_size);
-
-#if SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
- DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
- ioc->hint_shift_pdir, ioc->hint_mask_pdir);
-#endif
+ ioc->pdir_base = sba_alloc_pdir(pdir_size);
+
+ DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
+ __FUNCTION__, ioc->pdir_base, pdir_size,
+ ioc->hint_shift_pdir, ioc->hint_mask_pdir);
ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
** On C3000 w/512MB mem, HP-UX 10.20 reports:
** ibase=0, imask=0xFE000000, size=0x2000000.
*/
- ioc->ibase = 0;
+ ioc->ibase = IOC_IOVA_SPACE_BASE | 1; /* bit 0 == enable bit */
ioc->imask = iova_space_mask; /* save it */
-#ifdef ZX1_SUPPORT
- ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
-#endif
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
__FUNCTION__, ioc->ibase, ioc->imask);
/*
** Program the IOC's ibase and enable IOVA translation
*/
- WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
+ WRITE_REG(ioc->ibase, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
/* Set I/O PDIR Page size to 4K */
*/
WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
- ioc->ibase = 0; /* used by SBA_IOVA and related macros */
-
DBG_INIT("%s() DONE\n", __FUNCTION__);
}
*/
}
- if (!IS_PLUTO(sba_dev->iodc)) {
- ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
- __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
- ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
- ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */
+ ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
+ DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
+ ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
+ ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */
- WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
+ WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
#ifdef DEBUG_SBA_INIT
- ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT(" 0x%Lx\n", ioc_ctl);
+ ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
+ DBG_INIT(" 0x%Lx\n", ioc_ctl);
#endif
- } /* if !PLUTO */
if (IS_ASTRO(sba_dev->iodc)) {
/* PAT_PDC (L-class) also reports the same goofy base */
sba_dev->ioc[0].ioc_hpa = ASTRO_IOC_OFFSET;
num_ioc = 1;
- } else if (IS_PLUTO(sba_dev->iodc)) {
- /* We use a negative value for IOC HPA so it gets
- * corrected when we add it with IKE's IOC offset.
- * Doesnt look clean, but fewer code.
- */
- sba_dev->ioc[0].ioc_hpa = -PLUTO_IOC_OFFSET;
- num_ioc = 1;
} else {
sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0;
num_ioc = 2;
/* flush out the writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- if (IS_PLUTO(sba_dev->iodc)) {
- sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
- } else {
- sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
- }
+ sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
}
}
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
- { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
/* These two entries commented out because we don't find them in a
* buswalk yet. If/when we do, they would cause us to think we had
* many more SBAs then we really do.
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, ASTRO_ROPES_PORT, 0xc },
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_ROPES_PORT, 0xc },
- */
-/* We shall also comment out Pluto Ropes Port since bus walk doesnt
- * report it yet.
- * { HPHW_BCPORT, HVERSION_REV_ANY_ID, PLUTO_ROPES_PORT, 0xc },
*/
{ 0, }
};
int i;
char *version;
+#ifdef DEBUG_SBA_INIT
sba_dump_ranges(dev->hpa);
+#endif
/* Read HW Rev First */
func_class = READ_REG(dev->hpa + SBA_FCLASS);
version = astro_rev;
} else if (IS_IKE(&dev->id)) {
- static char ike_rev[] = "Ike rev ?";
+ static char ike_rev[]="Ike rev ?";
+
ike_rev[8] = '0' + (char) (func_class & 0xff);
version = ike_rev;
- } else if (IS_PLUTO(&dev->id)) {
- static char pluto_rev[]="Pluto ?.?";
- pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
- pluto_rev[8] = '0' + (char) (func_class & 0x0f);
- version = pluto_rev;
} else {
- static char reo_rev[] = "REO rev ?";
+ static char reo_rev[]="REO rev ?";
+
reo_rev[8] = '0' + (char) (func_class & 0xff);
version = reo_rev;
}
if (!global_ioc_cnt) {
global_ioc_cnt = count_parisc_driver(&sba_driver);
- /* Astro and Pluto have one IOC per SBA */
- if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id)))
+ /* Only Astro has one IOC per SBA */
+ if (!IS_ASTRO(&dev->id))
global_ioc_cnt *= 2;
}
printk(KERN_INFO "%s found %s at 0x%lx\n",
MODULE_NAME, version, dev->hpa);
+#ifdef DEBUG_SBA_INIT
+ sba_dump_tlb(dev->hpa);
+#endif
+
sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
if (NULL == sba_dev) {
printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info);
} else if (IS_IKE(&dev->id)) {
create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info);
- } else if (IS_PLUTO(&dev->id)) {
- create_proc_info_entry("Pluto", 0, proc_mckinley_root, sba_proc_info);
} else {
create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info);
}
{
#ifdef CONFIG_SERIAL_8250
int retval;
-#ifdef CONFIG_SERIAL_8250_CONSOLE
extern void serial8250_console_init(void); /* drivers/serial/8250.c */
-#endif
-
+
if (!sio_dev.irq_region)
return; /* superio not present */
return;
}
-#ifdef CONFIG_SERIAL_8250_CONSOLE
serial8250_console_init();
-#endif
-
+
serial[1].iobase = sio_dev.sp2_base;
serial[1].irq = sio_dev.irq_region->data.irqbase + SP2_IRQ;
retval = early_serial_setup(&serial[1]);
#define PARPORT_MAX_SPINTIME_VALUE 1000
static int do_active_device(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void __user *result, size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
if (write) /* can't happen anyway */
return -EACCES;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void __user *result, size_t *lenp)
{
struct parport_device_info *info = table->extra2;
const char *str;
if (write) /* permissions stop this */
return -EACCES;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user (result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_base_addr (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_irq (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_dma (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_modes (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#
# PCI configuration
#
-config PCI_MSI
- bool "Message Signaled Interrupts (MSI and MSI-X)"
+config PCI_USE_VECTOR
+ bool "Vector-based interrupt indexing (MSI)"
depends on (X86_LOCAL_APIC && X86_IO_APIC) || IA64
default n
help
- This allows device drivers to enable MSI (Message Signaled
- Interrupts). Message Signaled Interrupts enable a device to
- generate an interrupt using an inbound Memory Write on its
- PCI bus instead of asserting a device IRQ pin.
+ This replaces the current existing IRQ-based index interrupt scheme
+ with the vector-base index scheme. The advantages of vector base
+ over IRQ base are listed below:
+ 1) Support MSI implementation.
+ 2) Support future IOxAPIC hotplug
+
+ Note that this allows the device drivers to enable MSI, Message
+ Signaled Interrupt, on all MSI capable device functions detected.
+ Message Signal Interrupt enables an MSI-capable hardware device to
+ send an inbound Memory Write on its PCI bus instead of asserting
+ IRQ signal on device IRQ pin.
If you don't know what to do here, say N.
obj-$(CONFIG_PPC64) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_PCI_USE_VECTOR) += msi.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
case PCI_CAP_ID_MSI:
{
int pos;
- u32 mask_bits;
+ unsigned int mask_bits;
pos = entry->mask_base;
- pci_read_config_dword(entry->dev, pos, &mask_bits);
+ entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
+ pos, 4, &mask_bits);
mask_bits &= ~(1);
mask_bits |= flag;
- pci_write_config_dword(entry->dev, pos, mask_bits);
+ entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
+ pos, 4, mask_bits);
break;
}
case PCI_CAP_ID_MSIX:
if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
return;
- pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
+ entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
+ msi_lower_address_reg(pos), 4,
&address.lo_address.value);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) <<
MSI_TARGET_CPU_SHIFT);
entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
- pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
+ entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
+ msi_lower_address_reg(pos), 4,
address.lo_address.value);
break;
}
static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
{
- struct msi_desc *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (!entry || !entry->dev) {
- spin_unlock_irqrestore(&msi_lock, flags);
- return 0;
- }
- entry->msi_attrib.state = 1; /* Mark it active */
- spin_unlock_irqrestore(&msi_lock, flags);
-
return 0; /* never anything pending */
}
-static void release_msi(unsigned int vector);
+static void pci_disable_msi(unsigned int vector);
static void shutdown_msi_irq(unsigned int vector)
{
- release_msi(vector);
+ pci_disable_msi(vector);
}
#define shutdown_msi_irq_wo_maskbit shutdown_msi_irq
static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
{
- struct msi_desc *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (!entry || !entry->dev) {
- spin_unlock_irqrestore(&msi_lock, flags);
- return 0;
- }
- entry->msi_attrib.state = 1; /* Mark it active */
- spin_unlock_irqrestore(&msi_lock, flags);
-
unmask_MSI_irq(vector);
return 0; /* never anything pending */
}
* which implement the MSI-X Capability Structure.
*/
static struct hw_interrupt_type msix_irq_type = {
- .typename = "PCI-MSI-X",
+ .typename = "PCI MSI-X",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_w_maskbit_type = {
- .typename = "PCI-MSI",
+ .typename = "PCI MSI",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
- .typename = "PCI-MSI",
+ .typename = "PCI MSI",
.startup = startup_msi_irq_wo_maskbit,
.shutdown = shutdown_msi_irq_wo_maskbit,
.enable = enable_msi_irq_wo_maskbit,
msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT);
}
-static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
static int assign_msi_vector(void)
{
static int new_vector_avail = 1;
spin_lock_irqsave(&msi_lock, flags);
if (!new_vector_avail) {
- int free_vector = 0;
-
/*
* vector_irq[] = -1 indicates that this specific vector is:
* - assigned for MSI (since MSI have no associated IRQ) or
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (vector_irq[vector] != 0)
continue;
- free_vector = vector;
- if (!msi_desc[vector])
- break;
- else
- continue;
- }
- if (!free_vector) {
+ vector_irq[vector] = -1;
+ nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- return -EBUSY;
+ return vector;
}
- vector_irq[free_vector] = -1;
- nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- if (msi_desc[free_vector] != NULL) {
- struct pci_dev *dev;
- int tail;
-
- /* free all linked vectors before re-assign */
- do {
- spin_lock_irqsave(&msi_lock, flags);
- dev = msi_desc[free_vector]->dev;
- tail = msi_desc[free_vector]->link.tail;
- spin_unlock_irqrestore(&msi_lock, flags);
- msi_free_vector(dev, tail, 1);
- } while (free_vector != tail);
- }
-
- return free_vector;
+ return -EBUSY;
}
vector = assign_irq_vector(AUTO_ASSIGN);
last_alloc_vector = vector;
printk(KERN_INFO "WARNING: MSI INIT FAILURE\n");
return status;
}
- last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
- if (last_alloc_vector < 0) {
- pci_msi_enable = 0;
- printk(KERN_INFO "WARNING: ALL VECTORS ARE BUSY\n");
- status = -EBUSY;
- return status;
- }
- vector_irq[last_alloc_vector] = 0;
- nr_released_vectors++;
printk(KERN_INFO "MSI INIT SUCCESS\n");
return status;
static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u16 control;
+ u32 control;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_enable(control, 1);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
} else {
msix_enable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u16 cmd;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ u32 cmd;
+ dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
cmd |= PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+ dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
}
}
static void disable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u16 control;
+ u32 control;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
} else {
msix_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u16 cmd;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ u32 cmd;
+ dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+ dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
}
}
-static int msi_lookup_vector(struct pci_dev *dev, int type)
+static int msi_lookup_vector(struct pci_dev *dev)
{
int vector;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
- msi_desc[vector]->msi_attrib.type != type ||
+ msi_desc[vector]->msi_attrib.entry_nr ||
msi_desc[vector]->msi_attrib.default_vector != dev->irq)
- continue;
+ continue; /* not entry 0, skip */
spin_unlock_irqrestore(&msi_lock, flags);
- /* This pre-assigned MSI vector for this device
+ /* This pre-assigned entry-0 MSI vector for this device
already exits. Override dev->irq with this vector */
dev->irq = vector;
return 0;
if (!dev)
return;
- if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
+ if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) {
+ nr_reserved_vectors++;
nr_msix_devices++;
- else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
+ } else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
nr_reserved_vectors++;
}
struct msg_address address;
struct msg_data data;
int pos, vector;
- u16 control;
+ u32 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (!pos)
+ return -EINVAL;
+
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
+ 2, &control);
+ if (control & PCI_MSI_FLAGS_ENABLE)
+ return 0;
+
+ if (!msi_lookup_vector(dev)) {
+ /* Lookup Sucess */
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ return 0;
+ }
/* MSI Entry Initialization */
if (!(entry = alloc_msi_entry()))
return -ENOMEM;
kmem_cache_free(msi_cachep, entry);
return -EBUSY;
}
- entry->link.head = vector;
- entry->link.tail = vector;
entry->msi_attrib.type = PCI_CAP_ID_MSI;
- entry->msi_attrib.state = 0; /* Mark it not active */
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
- dev->irq = vector;
+ entry->msi_attrib.default_vector = dev->irq;
+ dev->irq = vector; /* save default pre-assigned ioapic vector */
entry->dev = dev;
if (is_mask_bit_support(control)) {
entry->mask_base = msi_mask_bits_reg(pos,
msi_data_init(&data, vector);
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- address.lo_address.value);
+ dev->bus->ops->write(dev->bus, dev->devfn, msi_lower_address_reg(pos),
+ 4, address.lo_address.value);
if (is_64bit_address(control)) {
- pci_write_config_dword(dev,
- msi_upper_address_reg(pos), address.hi_address);
- pci_write_config_word(dev,
- msi_data_reg(pos, 1), *((u32*)&data));
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_upper_address_reg(pos), 4, address.hi_address);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_data_reg(pos, 1), 2, *((u32*)&data));
} else
- pci_write_config_word(dev,
- msi_data_reg(pos, 0), *((u32*)&data));
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_data_reg(pos, 0), 2, *((u32*)&data));
if (entry->msi_attrib.maskbit) {
unsigned int maskbits, temp;
/* All MSIs are unmasked by default, Mask them all */
- pci_read_config_dword(dev,
- msi_mask_bits_reg(pos, is_64bit_address(control)),
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
&maskbits);
temp = (1 << multi_msi_capable(control));
temp = ((temp - 1) & ~temp);
maskbits |= temp;
- pci_write_config_dword(dev,
- msi_mask_bits_reg(pos, is_64bit_address(control)),
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
maskbits);
}
attach_msi_entry(entry, vector);
* @dev: pointer to the pci_dev data structure of MSI-X device function
*
* Setup the MSI-X capability structure of device funtion with a
- * single MSI-X vector. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated vectors or non-zero for otherwise.
+ * single MSI-X vector. A return of zero indicates the successful setup
+ * of an entry zero with the new MSI-X vector or non-zero for otherwise.
+ * To request for additional MSI-X vectors, the device drivers are
+ * required to utilize the following supported APIs:
+ * 1) msi_alloc_vectors(...) for requesting one or more MSI-X vectors
+ * 2) msi_free_vectors(...) for releasing one or more MSI-X vectors
+ * back to PCI subsystem before calling free_irq(...)
**/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static int msix_capability_init(struct pci_dev *dev)
{
- struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
+ struct msi_desc *entry;
struct msg_address address;
struct msg_data data;
- int vector, pos, i, j, nr_entries, temp = 0;
+ int vector = 0, pos, dev_msi_cap, i;
u32 phys_addr, table_offset;
- u16 control;
+ u32 control;
u8 bir;
void *base;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!pos)
+ return -EINVAL;
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- nr_entries = multi_msix_capable(control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE)
+ return 0;
+
+ if (!msi_lookup_vector(dev)) {
+ /* Lookup Sucess */
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ return 0;
+ }
+
+ dev_msi_cap = multi_msix_capable(control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msix_table_offset_reg(pos), 4, &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
phys_addr = pci_resource_start (dev, bir);
phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
if (!request_mem_region(phys_addr,
- nr_entries * PCI_MSIX_ENTRY_SIZE,
- "MSI-X vector table"))
+ dev_msi_cap * PCI_MSIX_ENTRY_SIZE,
+ "MSI-X iomap Failure"))
return -ENOMEM;
- base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL) {
- release_mem_region(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- return -ENOMEM;
- }
- /* MSI-X Table Initialization */
- for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry();
- if (!entry)
- break;
- if ((vector = get_msi_vector(dev)) < 0)
- break;
+ base = ioremap_nocache(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
+ if (base == NULL)
+ goto free_region;
+ /* MSI Entry Initialization */
+ entry = alloc_msi_entry();
+ if (!entry)
+ goto free_iomap;
+ if ((vector = get_msi_vector(dev)) < 0)
+ goto free_entry;
- j = entries[i].entry;
- entries[i].vector = vector;
- entry->msi_attrib.type = PCI_CAP_ID_MSIX;
- entry->msi_attrib.state = 0; /* Mark it not active */
- entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.default_vector = dev->irq;
- entry->dev = dev;
- entry->mask_base = (unsigned long)base;
- if (!head) {
- entry->link.head = vector;
- entry->link.tail = vector;
- head = entry;
- } else {
- entry->link.head = temp;
- entry->link.tail = tail->link.tail;
- tail->link.tail = vector;
- head->link.head = vector;
- }
- temp = vector;
- tail = entry;
- /* Replace with MSI-X handler */
- irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
- /* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu =
- ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- writel(address.lo_address.value,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = 1;
+ entry->msi_attrib.default_vector = dev->irq;
+ dev->irq = vector; /* save default pre-assigned ioapic vector */
+ entry->dev = dev;
+ entry->mask_base = (unsigned long)base;
+ /* Replace with MSI handler */
+ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
+ /* Configure MSI-X capability structure */
+ msi_address_init(&address);
+ msi_data_init(&data, vector);
+ entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
+ MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ writel(address.lo_address.value, base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(address.hi_address, base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(*(u32*)&data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ /* Initialize all entries from 1 up to 0 */
+ for (i = 1; i < dev_msi_cap; i++) {
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
- attach_msi_entry(entry, vector);
}
- if (i != nvec) {
- i--;
- for (; i >= 0; i--) {
- vector = (entries + i)->vector;
- msi_free_vector(dev, vector, 0);
- (entries + i)->vector = 0;
- }
- return -EBUSY;
- }
- /* Set MSI-X enabled bits */
+ attach_msi_entry(entry, vector);
+ /* Set MSI enabled bits */
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
return 0;
+
+free_entry:
+ kmem_cache_free(msi_cachep, entry);
+free_iomap:
+ iounmap(base);
+free_region:
+ release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
+
+ return ((vector < 0) ? -EBUSY : -ENOMEM);
}
/**
- * pci_enable_msi - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
+ * pci_enable_msi - configure device's MSI(X) capability structure
+ * @dev: pointer to the pci_dev data structure of MSI(X) device function
*
- * Setup the MSI capability structure of device function with
- * a single MSI vector upon its software driver call to request for
- * MSI mode enabled on its hardware device function. A return of zero
- * indicates the successful setup of an entry zero with the new MSI
+ * Setup the MSI/MSI-X capability structure of device function with
+ * a single MSI(X) vector upon its software driver call to request for
+ * MSI(X) mode enabled on its hardware device function. A return of zero
+ * indicates the successful setup of an entry zero with the new MSI(X)
* vector or non-zero for otherwise.
**/
int pci_enable_msi(struct pci_dev* dev)
{
- int pos, temp = dev->irq, status = -EINVAL;
- u16 control;
+ int status = -EINVAL;
if (!pci_msi_enable || !dev)
return status;
- if ((status = msi_init()) < 0)
- return status;
-
- if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
- return -EINVAL;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE)
- return 0; /* Already in MSI mode */
-
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- /* Lookup Sucess */
- unsigned long flags;
+ if (msi_init() < 0)
+ return -ENOMEM;
- spin_lock_irqsave(&msi_lock, flags);
- if (!vector_irq[dev->irq]) {
- msi_desc[dev->irq]->msi_attrib.state = 0;
- vector_irq[dev->irq] = -1;
- nr_released_vectors--;
- spin_unlock_irqrestore(&msi_lock, flags);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
- return 0;
- }
- spin_unlock_irqrestore(&msi_lock, flags);
- dev->irq = temp;
- }
- /* Check whether driver already requested for MSI-X vectors */
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- printk(KERN_INFO "Can't enable MSI. Device already had MSI-X vectors assigned\n");
- dev->irq = temp;
- return -EINVAL;
- }
- status = msi_capability_init(dev);
- if (!status) {
- if (!pos)
- nr_reserved_vectors--; /* Only MSI capable */
- else if (nr_msix_devices > 0)
- nr_msix_devices--; /* Both MSI and MSI-X capable,
- but choose enabling MSI */
- }
+ if ((status = msix_capability_init(dev)) == -EINVAL)
+ status = msi_capability_init(dev);
+ if (!status)
+ nr_reserved_vectors--;
return status;
}
-void pci_disable_msi(struct pci_dev* dev)
+static int msi_free_vector(struct pci_dev* dev, int vector);
+static void pci_disable_msi(unsigned int vector)
{
+ int head, tail, type, default_vector;
struct msi_desc *entry;
- int pos, default_vector;
- u16 control;
+ struct pci_dev *dev;
unsigned long flags;
- if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
- return;
-
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
+ entry = msi_desc[vector];
+ if (!entry || !entry->dev) {
spin_unlock_irqrestore(&msi_lock, flags);
return;
}
- if (entry->msi_attrib.state) {
- spin_unlock_irqrestore(&msi_lock, flags);
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- dev->irq);
- BUG_ON(entry->msi_attrib.state > 0);
- } else {
- vector_irq[dev->irq] = 0; /* free it */
- nr_released_vectors++;
- default_vector = entry->msi_attrib.default_vector;
- spin_unlock_irqrestore(&msi_lock, flags);
- /* Restore dev->irq to its default pin-assertion vector */
- dev->irq = default_vector;
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
+ dev = entry->dev;
+ type = entry->msi_attrib.type;
+ head = entry->link.head;
+ tail = entry->link.tail;
+ default_vector = entry->msi_attrib.default_vector;
+ spin_unlock_irqrestore(&msi_lock, flags);
+
+ disable_msi_mode(dev, pci_find_capability(dev, type), type);
+ /* Restore dev->irq to its default pin-assertion vector */
+ dev->irq = default_vector;
+ if (type == PCI_CAP_ID_MSIX && head != tail) {
+ /* Bad driver, which do not call msi_free_vectors before exit.
+ We must do a cleanup here */
+ while (1) {
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[vector];
+ head = entry->link.head;
+ tail = entry->link.tail;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ if (tail == head)
+ break;
+ if (msi_free_vector(dev, entry->link.tail))
+ break;
+ }
}
}
-static void release_msi(unsigned int vector)
+static int msi_alloc_vector(struct pci_dev* dev, int head)
{
struct msi_desc *entry;
+ struct msg_address address;
+ struct msg_data data;
+ int i, offset, pos, dev_msi_cap, vector;
+ u32 low_address, control;
+ unsigned long base = 0L;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (entry && entry->dev)
- entry->msi_attrib.state = 0; /* Mark it not active */
+ entry = msi_desc[dev->irq];
+ if (!entry) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return -EINVAL;
+ }
+ base = entry->mask_base;
spin_unlock_irqrestore(&msi_lock, flags);
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
+ 2, &control);
+ dev_msi_cap = multi_msix_capable(control);
+ for (i = 1; i < dev_msi_cap; i++) {
+ if (!(low_address = readl(base + i * PCI_MSIX_ENTRY_SIZE)))
+ break;
+ }
+ if (i >= dev_msi_cap)
+ return -EINVAL;
+
+ /* MSI Entry Initialization */
+ if (!(entry = alloc_msi_entry()))
+ return -ENOMEM;
+
+ if ((vector = get_new_vector()) < 0) {
+ kmem_cache_free(msi_cachep, entry);
+ return vector;
+ }
+ entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.entry_nr = i;
+ entry->msi_attrib.maskbit = 1;
+ entry->dev = dev;
+ entry->link.head = head;
+ entry->mask_base = base;
+ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
+ /* Configure MSI-X capability structure */
+ msi_address_init(&address);
+ msi_data_init(&data, vector);
+ entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
+ MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ writel(address.lo_address.value, base + offset +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(address.hi_address, base + offset +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(*(u32*)&data, base + offset + PCI_MSIX_ENTRY_DATA_OFFSET);
+ writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ attach_msi_entry(entry, vector);
+
+ return vector;
}
-static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
+static int msi_free_vector(struct pci_dev* dev, int vector)
{
struct msi_desc *entry;
- int head, entry_nr, type;
+ int entry_nr, type;
unsigned long base = 0L;
unsigned long flags;
}
type = entry->msi_attrib.type;
entry_nr = entry->msi_attrib.entry_nr;
- head = entry->link.head;
base = entry->mask_base;
- msi_desc[entry->link.head]->link.tail = entry->link.tail;
- msi_desc[entry->link.tail]->link.head = entry->link.head;
- entry->dev = NULL;
- if (!reassign) {
- vector_irq[vector] = 0;
- nr_released_vectors++;
+ if (entry->link.tail != entry->link.head) {
+ msi_desc[entry->link.head]->link.tail = entry->link.tail;
+ if (entry->link.tail)
+ msi_desc[entry->link.tail]->link.head = entry->link.head;
}
+ entry->dev = NULL;
+ vector_irq[vector] = 0;
+ nr_released_vectors++;
msi_desc[vector] = NULL;
spin_unlock_irqrestore(&msi_lock, flags);
kmem_cache_free(msi_cachep, entry);
-
if (type == PCI_CAP_ID_MSIX) {
- if (!reassign)
- writel(1, base +
- entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
- if (head == vector) {
- /*
- * Detect last MSI-X vector to be released.
- * Release the MSI-X memory-mapped table.
- */
- int pos, nr_entries;
- u32 phys_addr, table_offset;
- u16 control;
- u8 bir;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, msi_control_reg(pos),
- &control);
- nr_entries = multi_msix_capable(control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- phys_addr = pci_resource_start (dev, bir);
- phys_addr += (u32)(table_offset &
- ~PCI_MSIX_FLAGS_BIRMASK);
- iounmap((void*)base);
- release_mem_region(phys_addr,
- nr_entries * PCI_MSIX_ENTRY_SIZE);
- }
- }
+ int offset;
- return 0;
-}
-
-static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
-{
- int vector = head, tail = 0;
- int i = 0, j = 0, nr_entries = 0;
- unsigned long base = 0L;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- while (head != tail) {
- nr_entries++;
- tail = msi_desc[vector]->link.tail;
- if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
- j = vector;
- vector = tail;
+ offset = entry_nr * PCI_MSIX_ENTRY_SIZE;
+ writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ writel(0, base + offset + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
}
- if (*nvec > nr_entries) {
- spin_unlock_irqrestore(&msi_lock, flags);
- *nvec = nr_entries;
- return -EINVAL;
- }
- vector = ((j > 0) ? j : head);
- for (i = 0; i < *nvec; i++) {
- j = msi_desc[vector]->msi_attrib.entry_nr;
- msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
- vector_irq[vector] = -1; /* Mark it busy */
- nr_released_vectors--;
- entries[i].vector = vector;
- if (j != (entries + i)->entry) {
- base = msi_desc[vector]->mask_base;
- msi_desc[vector]->msi_attrib.entry_nr =
- (entries + i)->entry;
- writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
- (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
- (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
- base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_DATA_OFFSET);
- }
- vector = msi_desc[vector]->link.tail;
- }
- spin_unlock_irqrestore(&msi_lock, flags);
return 0;
}
/**
- * pci_enable_msix - configure device's MSI-X capability structure
+ * msi_alloc_vectors - allocate additional MSI-X vectors
* @dev: pointer to the pci_dev data structure of MSI-X device function
- * @data: pointer to an array of MSI-X entries
+ * @vector: pointer to an array of new allocated MSI-X vectors
* @nvec: number of MSI-X vectors requested for allocation by device driver
*
- * Setup the MSI-X capability structure of device function with the number
- * of requested vectors upon its software driver call to request for
- * MSI-X mode enabled on its hardware device function. A return of zero
- * indicates the successful configuration of MSI-X capability structure
- * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
- * Or a return of > 0 indicates that driver request is exceeding the number
- * of vectors available. Driver should use the returned value to re-send
- * its request.
+ * Allocate additional MSI-X vectors requested by device driver. A
+ * return of zero indicates the successful setup of MSI-X capability
+ * structure with new allocated MSI-X vectors or non-zero for otherwise.
**/
-int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
+int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec)
{
- int status, pos, nr_entries, free_vectors;
- int i, j, temp;
- u16 control;
+ struct msi_desc *entry;
+ int i, head, pos, vec, free_vectors, alloc_vectors;
+ int *vectors = (int *)vector;
+ u32 control;
unsigned long flags;
- if (!pci_msi_enable || !dev || !entries)
+ if (!pci_msi_enable || !dev)
return -EINVAL;
- if ((status = msi_init()) < 0)
- return status;
-
if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
return -EINVAL;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE)
- return -EINVAL; /* Already in MSI-X mode */
-
- nr_entries = multi_msix_capable(control);
- if (nvec > nr_entries)
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
+ if (nvec > multi_msix_capable(control))
return -EINVAL;
- /* Check for any invalid entries */
- for (i = 0; i < nvec; i++) {
- if (entries[i].entry >= nr_entries)
- return -EINVAL; /* invalid entry */
- for (j = i + 1; j < nvec; j++) {
- if (entries[i].entry == entries[j].entry)
- return -EINVAL; /* duplicate entry */
- }
- }
- temp = dev->irq;
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- /* Lookup Sucess */
- nr_entries = nvec;
- /* Reroute MSI-X table */
- if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
- /* #requested > #previous-assigned */
- dev->irq = temp;
- return nr_entries;
- }
- dev->irq = temp;
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
- return 0;
- }
- /* Check whether driver already requested for MSI vector */
- if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- printk(KERN_INFO "Can't enable MSI-X. Device already had MSI vector assigned\n");
- dev->irq = temp;
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev || /* legal call */
+ entry->msi_attrib.type != PCI_CAP_ID_MSIX || /* must be MSI-X */
+ entry->link.head != entry->link.tail) { /* already multi */
+ spin_unlock_irqrestore(&msi_lock, flags);
return -EINVAL;
}
-
- spin_lock_irqsave(&msi_lock, flags);
/*
* msi_lock is provided to ensure that enough vectors resources are
* available before granting.
free_vectors /= nr_msix_devices;
spin_unlock_irqrestore(&msi_lock, flags);
- if (nvec > free_vectors) {
- if (free_vectors > 0)
- return free_vectors;
- else
- return -EBUSY;
- }
+ if (nvec > free_vectors)
+ return -EBUSY;
- status = msix_capability_init(dev, entries, nvec);
- if (!status && nr_msix_devices > 0)
+ alloc_vectors = 0;
+ head = dev->irq;
+ for (i = 0; i < nvec; i++) {
+ if ((vec = msi_alloc_vector(dev, head)) < 0)
+ break;
+ *(vectors + i) = vec;
+ head = vec;
+ alloc_vectors++;
+ }
+ if (alloc_vectors != nvec) {
+ for (i = 0; i < alloc_vectors; i++) {
+ vec = *(vectors + i);
+ msi_free_vector(dev, vec);
+ }
+ spin_lock_irqsave(&msi_lock, flags);
+ msi_desc[dev->irq]->link.tail = msi_desc[dev->irq]->link.head;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return -EBUSY;
+ }
+ if (nr_msix_devices > 0)
nr_msix_devices--;
- return status;
+ return 0;
}
-void pci_disable_msix(struct pci_dev* dev)
+/**
+ * msi_free_vectors - reclaim MSI-X vectors to unused state
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @vector: pointer to an array of released MSI-X vectors
+ * @nvec: number of MSI-X vectors requested for release by device driver
+ *
+ * Reclaim MSI-X vectors released by device driver to unused state,
+ * which may be used later on. A return of zero indicates the
+ * success or non-zero for otherwise. Device driver should call this
+ * before calling function free_irq.
+ **/
+int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
{
- int pos, temp;
- u16 control;
-
- if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return;
+ struct msi_desc *entry;
+ int i;
+ unsigned long flags;
- temp = dev->irq;
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- int state, vector, head, tail = 0, warning = 0;
- unsigned long flags;
+ if (!pci_msi_enable)
+ return -EINVAL;
- vector = head = dev->irq;
- spin_lock_irqsave(&msi_lock, flags);
- while (head != tail) {
- state = msi_desc[vector]->msi_attrib.state;
- if (state)
- warning = 1;
- else {
- vector_irq[vector] = 0; /* free it */
- nr_released_vectors++;
- }
- tail = msi_desc[vector]->link.tail;
- vector = tail;
- }
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev ||
+ entry->msi_attrib.type != PCI_CAP_ID_MSIX ||
+ entry->link.head == entry->link.tail) { /* Nothing to free */
spin_unlock_irqrestore(&msi_lock, flags);
- if (warning) {
- dev->irq = temp;
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
- BUG_ON(warning > 0);
- } else {
- dev->irq = temp;
- disable_msi_mode(dev,
- pci_find_capability(dev, PCI_CAP_ID_MSIX),
- PCI_CAP_ID_MSIX);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&msi_lock, flags);
- }
+ for (i = 0; i < nvec; i++) {
+ if (*(vector + i) == dev->irq)
+ continue;/* Don't free entry 0 if mistaken by driver */
+ msi_free_vector(dev, *(vector + i));
}
+
+ return 0;
}
/**
**/
void msi_remove_pci_irq_vectors(struct pci_dev* dev)
{
- int state, pos, temp;
+ struct msi_desc *entry;
+ int type, temp;
unsigned long flags;
if (!pci_msi_enable || !dev)
return;
- temp = dev->irq; /* Save IOAPIC IRQ */
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- spin_lock_irqsave(&msi_lock, flags);
- state = msi_desc[dev->irq]->msi_attrib.state;
+ if (!pci_find_capability(dev, PCI_CAP_ID_MSI)) {
+ if (!pci_find_capability(dev, PCI_CAP_ID_MSIX))
+ return;
+ }
+ temp = dev->irq;
+ if (msi_lookup_vector(dev))
+ return;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev) {
spin_unlock_irqrestore(&msi_lock, flags);
- if (state) {
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), dev->irq);
- BUG_ON(state > 0);
- } else /* Release MSI vector assigned to this device */
- msi_free_vector(dev, dev->irq, 0);
- dev->irq = temp; /* Restore IOAPIC IRQ */
+ return;
}
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- int vector, head, tail = 0, warning = 0;
- unsigned long base = 0L;
+ type = entry->msi_attrib.type;
+ spin_unlock_irqrestore(&msi_lock, flags);
- vector = head = dev->irq;
- while (head != tail) {
+ msi_free_vector(dev, dev->irq);
+ if (type == PCI_CAP_ID_MSIX) {
+ int i, pos, dev_msi_cap;
+ u32 phys_addr, table_offset;
+ u32 control;
+ u8 bir;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
+ dev_msi_cap = multi_msix_capable(control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msix_table_offset_reg(pos), 4, &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+ phys_addr = pci_resource_start (dev, bir);
+ phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
+ for (i = FIRST_DEVICE_VECTOR; i < NR_IRQS; i++) {
spin_lock_irqsave(&msi_lock, flags);
- state = msi_desc[vector]->msi_attrib.state;
- tail = msi_desc[vector]->link.tail;
- base = msi_desc[vector]->mask_base;
+ if (!msi_desc[i] || msi_desc[i]->dev != dev) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&msi_lock, flags);
- if (state)
- warning = 1;
- else if (vector != head) /* Release MSI-X vector */
- msi_free_vector(dev, vector, 0);
- vector = tail;
- }
- msi_free_vector(dev, vector, 0);
- if (warning) {
- /* Force to release the MSI-X memory-mapped table */
- u32 phys_addr, table_offset;
- u16 control;
- u8 bir;
-
- pci_read_config_word(dev, msi_control_reg(pos),
- &control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- phys_addr = pci_resource_start (dev, bir);
- phys_addr += (u32)(table_offset &
- ~PCI_MSIX_FLAGS_BIRMASK);
- iounmap((void*)base);
- release_mem_region(phys_addr, PCI_MSIX_ENTRY_SIZE *
- multi_msix_capable(control));
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
- BUG_ON(warning > 0);
+ msi_free_vector(dev, i);
}
- dev->irq = temp; /* Restore IOAPIC IRQ */
+ writel(1, entry->mask_base + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ iounmap((void*)entry->mask_base);
+ release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
}
+ dev->irq = temp;
+ nr_reserved_vectors++;
}
EXPORT_SYMBOL(pci_enable_msi);
-EXPORT_SYMBOL(pci_disable_msi);
-EXPORT_SYMBOL(pci_enable_msix);
-EXPORT_SYMBOL(pci_disable_msix);
+EXPORT_SYMBOL(msi_alloc_vectors);
+EXPORT_SYMBOL(msi_free_vectors);
struct {
__u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
__u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 state : 1; /* {0: free, 1: busy} */
- __u8 reserved: 1; /* reserved */
+ __u8 reserved: 2; /* reserved */
__u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */
__u8 current_cpu; /* current destination cpu */
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
- if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
- switch (dev->subsystem_device) {
- case 0x1751: /* M2N notebook */
- asus_hides_smbus = 1;
- }
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc },
#include "cirrus.h"
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Driver for the Cirrus PD6729 PCI-PCMCIA bridge");
-MODULE_AUTHOR("Jun Komuro <komurojun@mbn.nifty.com>");
#define MAX_SOCKETS 2
-/*
- * simple helper functions
- * External clock time, in nanoseconds. 120 ns = 8.33 MHz
- */
+/* simple helper functions */
+/* External clock time, in nanoseconds. 120 ns = 8.33 MHz */
#define to_cycles(ns) ((ns)/120)
static spinlock_t port_lock = SPIN_LOCK_UNLOCKED;
*value |= SS_DETECT;
}
- /*
- * IO cards have a different meaning of bits 0,1
- * Also notice the inverse-logic on the bits
- */
+ /* IO cards have a different meaning of bits 0,1 */
+ /* Also notice the inverse-logic on the bits */
if (indirect_read(socket, I365_INTCTL) & I365_PC_IOCARD) {
/* IO card */
if (!(status & I365_CS_STSCHG))
state->io_irq = 0;
state->csc_mask = 0;
- /*
- * First the power status of the socket
- * PCTRL - Power Control Register
- */
+ /* First the power status of the socket */
+ /* PCTRL - Power Control Register */
reg = indirect_read(socket, I365_POWER);
if (reg & I365_PWR_AUTO)
state->Vpp = 120;
}
- /*
- * Now the IO card, RESET flags and IO interrupt
- * IGENC, Interrupt and General Control
- */
+ /* Now the IO card, RESET flags and IO interrupt */
+ /* IGENC, Interrupt and General Control */
reg = indirect_read(socket, I365_INTCTL);
if ((reg & I365_PC_RESET) == 0)
/* Set the IRQ number */
state->io_irq = socket->socket.pci_irq;
- /*
- * Card status change
- * CSCICR, Card Status Change Interrupt Configuration
- */
+ /* Card status change */
+ /* CSCICR, Card Status Change Interrupt Configuration */
reg = indirect_read(socket, I365_CSCINT);
if (reg & I365_CSC_DETECT)
printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge at 0x%lx on irq %d\n",
pci_resource_start(dev, 0), dev->irq);
printk(KERN_INFO "pd6729: configured as a %d socket device.\n", MAX_SOCKETS);
- /*
- * Since we have no memory BARs some firmware we may not
- * have had PCI_COMMAND_MEM enabled, yet the device needs
- * it.
- */
+ /* Since we have no memory BARs some firmware we may not
+ have had PCI_COMMAND_MEM enabled, yet the device needs
+ it. */
pci_read_config_byte(dev, PCI_COMMAND, &configbyte);
if (!(configbyte & PCI_COMMAND_MEMORY)) {
printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n");
file under either the MPL or the GPL.
======================================================================*/
+/*
+ * Please see linux/Documentation/arm/SA1100/PCMCIA for more information
+ * on the low-level kernel interface.
+ */
#include <linux/module.h>
#include <linux/init.h>
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0,
- event, sizeof(u16), NULL, 0);
+ event, sizeof(u16), 0, 0);
return status;
}
#endif
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- info, 65536, NULL, 0);
+ info, 65536, 0, 0);
return status;
}
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- data, sizeof(struct pnp_isa_config_struc), NULL, 0);
+ data, sizeof(struct pnp_isa_config_struc), 0, 0);
return status;
}
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS,
- data, sizeof(struct escd_info_struc), NULL, 0);
+ data, sizeof(struct escd_info_struc), 0, 0);
return status;
}
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
+ /* Check position. */
+ if (ppos != &filp->f_pos) {
+ /*
+ * "A request was outside the capabilities of the device."
+ * This check uses internal knowledge about how pread and
+ * read work...
+ */
+ DBF_EVENT(6, "TCHAR:ppos wrong\n");
+ return -EOVERFLOW;
+ }
/*
* If the tape isn't terminated yet, do it now. And since we then
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
+ /* Check position */
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ DBF_EVENT(6, "TCHAR:ppos wrong\n");
+ return -EOVERFLOW;
+ }
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- return nonseekable_open(inode, filp);
+ return 0;
}
tape_put_device(device);
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
- * $Revision: 1.115 $
+ * $Revision: 1.114 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
return -ENOMEM;
memset(chp, 0, sizeof(struct channel_path));
+ chps[chpid] = chp;
+
/* fill in status, etc. */
chp->id = chpid;
chp->state = 1;
if (ret) {
printk(KERN_WARNING "%s: could not register %02x\n",
__func__, chpid);
- goto out_free;
+ return ret;
}
ret = device_create_file(&chp->dev, &dev_attr_status);
- if (ret) {
+ if (ret)
device_unregister(&chp->dev);
- goto out_free;
- } else
- chps[chpid] = chp;
- return ret;
-out_free:
- kfree(chp);
+
return ret;
}
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
/* Use with care. */
-#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(int irq);
extern struct subchannel *cio_get_console_subchannel(void);
-#else
-#define cio_is_console(irq) 0
-#define cio_get_console_subchannel() NULL
-#endif
extern int cio_show_msg;
};
if (notify) {
/* Get device online again. */
- cdev->private->state = DEV_STATE_OFFLINE;
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
return;
/*
* drivers/s390/cio/device_ops.c
*
- * $Revision: 1.50 $
+ * $Revision: 1.47 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
-#include <linux/delay.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
if ((ret == -EBUSY) || (ret == -EACCES)) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
- msleep(10);
+ schedule_timeout(1);
spin_lock_irq(&sch->lock);
continue;
}
break;
/* Try again later. */
spin_unlock_irq(&sch->lock);
- msleep(10);
+ schedule_timeout(1);
spin_lock_irq(&sch->lock);
} while (1);
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
ccw_device_path_notoper(cdev);
- if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
- (irb->scsw.dstat & DEV_STAT_CHN_END)) {
+ if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
#include "ioasm.h"
#include "chsc.h"
-#define VERSION_QDIO_C "$Revision: 1.84 $"
+#define VERSION_QDIO_C "$Revision: 1.83 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
static debug_info_t *qdio_dbf_sbal;
static debug_info_t *qdio_dbf_trace;
static debug_info_t *qdio_dbf_sense;
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
static debug_info_t *qdio_dbf_slsb_out;
static debug_info_t *qdio_dbf_slsb_in;
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
/* iQDIO stuff: */
static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
qdio_is_outbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
-#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"oqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"oqisdone");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
return (no_used==0);
}
qdio_kick_outbound_q(struct qdio_q *q)
{
int result;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"kickoutq");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
if (!q->siga_out)
return;
switch (result) {
case 0:
- /* went smooth this time, reset timestamp */
-#ifdef CONFIG_QDIO_DEBUG
+ /* went smooth this time, reset timestamp */
QDIO_DBF_TEXT3(0,trace,"cc2reslv");
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
q->timing.busy_start=0;
-#endif /* CONFIG_QDIO_DEBUG */
break;
case (2|QDIO_SIGA_ERROR_B_BIT_SET):
/* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
+ atomic_inc(&q->busy_siga_counter);
/* if the last siga was successful, save
* timestamp here */
break;
}
QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
/* else fallthrough and report error */
default:
/* for plain cc=1, 2 or 3: */
qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, real_end, count;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
start = q->first_element_to_kick;
/* last_move_ftc was just updated */
count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
(QDIO_MAX_BUFFERS_PER_Q-1);
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"kickouth");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (q->state==QDIO_IRQ_STATE_ACTIVE)
q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
int f,f_mod_no;
volatile char *slsb;
int first_not_to_check;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif /* CONFIG_QDIO_DEBUG */
#ifdef QDIO_USE_PROCESSING_STATE
int last_position=-1;
#endif /* QDIO_USE_PROCESSING_STATE */
/* P_ERROR means frontier is reached, break and report error */
case SLSB_P_INPUT_ERROR:
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"inperr%2x",f_mod_no);
QDIO_DBF_TEXT3(1,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
iqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
/* propagate the change from 82 to 80 through VM */
SYNC_MEMORY;
-#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"iqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"iniqisdo");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
if (!no_used)
return 1;
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
* has (probably) not moved (see qdio_inbound_processing)
*/
if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisdon");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
return 1;
} else {
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisntd");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
return 0;
}
}
qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end, real_end, i;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
QDIO_DBF_TEXT4(0,trace,"kickinh");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
}
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
q->handler(q->cdev,
qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
{
int i;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT5(0,trace,"newstate");
sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
QDIO_DBF_TEXT5(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
irq_ptr->state=state;
for (i=0;i<irq_ptr->no_input_qs;i++)
int cstat,dstat;
char dbf_text[15];
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "qint");
sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (!intparm) {
QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
qdio_irq_check_sense(irq_ptr->irq, irb);
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "state:%d", irq_ptr->state);
QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
int cc;
struct qdio_q *q;
struct qdio_irq *irq_ptr;
- void *ptr;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15]="SyncXXXX";
-#endif
+ void *ptr;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
-#ifdef CONFIG_QDIO_DEBUG
*((int*)(&dbf_text[4])) = irq_ptr->irq;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
*((int*)(&dbf_text[0]))=flags;
*((int*)(&dbf_text[4]))=queue_number;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
-#endif /* CONFIG_QDIO_DEBUG */
if (flags&QDIO_FLAG_SYNC_INPUT) {
q=irq_ptr->input_qs[queue_number];
unsigned int count,struct qdio_buffer *buffers)
{
struct qdio_irq *irq_ptr;
-#ifdef CONFIG_QDIO_DEBUG
+
char dbf_text[20];
sprintf(dbf_text,"doQD%04x",cdev->private->irq);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
(count>QDIO_MAX_BUFFERS_PER_Q) ||
if (!irq_ptr)
return -ENODEV;
-#ifdef CONFIG_QDIO_DEBUG
if (callflags&QDIO_FLAG_SYNC_INPUT)
QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
sizeof(void*));
QDIO_DBF_TEXT3(0,trace,dbf_text);
sprintf(dbf_text,"qi%02xct%02x",qidx,count);
QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
debug_unregister(qdio_dbf_sense);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
if (qdio_dbf_slsb_out)
debug_unregister(qdio_dbf_slsb_out);
if (qdio_dbf_slsb_in)
debug_unregister(qdio_dbf_slsb_in);
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
}
static int
debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
QDIO_DBF_SLSB_OUT_INDEX,
QDIO_DBF_SLSB_OUT_NR_AREAS,
goto oom;
debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
return 0;
oom:
QDIO_PRINT_ERR("not enough memory for dbf.\n");
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
-#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
+#define VERSION_CIO_QDIO_H "$Revision: 1.24 $"
-#ifdef CONFIG_QDIO_DEBUG
+//#define QDIO_DBF_LIKE_HELL
+
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_VERBOSE_LEVEL 9
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_VERBOSE_LEVEL 5
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_USE_PROCESSING_STATE
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SETUP_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SBAL_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SENSE_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_ARCH_S390X */
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define set_slsb(x,y) \
if(q->queue_type==QDIO_TRACE_QTYPE) { \
if(q->is_input_q) { \
QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} \
}
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define set_slsb(x,y) qdio_set_slsb(x,y)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
struct qdio_q {
volatile struct slsb slsb;
/*
*
- * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
+ * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $)
*
- * CTC / ESCON network driver - s390 dbf exploit.
+ * Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
+ * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
/**
* Debug Facility Stuff
*/
-debug_info_t *ctc_dbf_setup = NULL;
-debug_info_t *ctc_dbf_data = NULL;
-debug_info_t *ctc_dbf_trace = NULL;
+debug_info_t *dbf_setup = NULL;
+debug_info_t *dbf_data = NULL;
+debug_info_t *dbf_trace = NULL;
-DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
+DEFINE_PER_CPU(char[256], dbf_txt_buf);
void
-ctc_unregister_dbf_views(void)
+unregister_dbf_views(void)
{
- if (ctc_dbf_setup)
- debug_unregister(ctc_dbf_setup);
- if (ctc_dbf_data)
- debug_unregister(ctc_dbf_data);
- if (ctc_dbf_trace)
- debug_unregister(ctc_dbf_trace);
+ if (dbf_setup)
+ debug_unregister(dbf_setup);
+ if (dbf_data)
+ debug_unregister(dbf_data);
+ if (dbf_trace)
+ debug_unregister(dbf_trace);
}
int
-ctc_register_dbf_views(void)
+register_dbf_views(void)
{
- ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
+ dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
CTC_DBF_SETUP_INDEX,
CTC_DBF_SETUP_NR_AREAS,
CTC_DBF_SETUP_LEN);
- ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
+ dbf_data = debug_register(CTC_DBF_DATA_NAME,
CTC_DBF_DATA_INDEX,
CTC_DBF_DATA_NR_AREAS,
CTC_DBF_DATA_LEN);
- ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
+ dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
CTC_DBF_TRACE_INDEX,
CTC_DBF_TRACE_NR_AREAS,
CTC_DBF_TRACE_LEN);
- if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
- (ctc_dbf_trace == NULL)) {
- ctc_unregister_dbf_views();
+ if ((dbf_setup == NULL) || (dbf_data == NULL) ||
+ (dbf_trace == NULL)) {
+ unregister_dbf_views();
return -ENOMEM;
}
- debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
+ debug_register_view(dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL);
- debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
+ debug_register_view(dbf_data, &debug_hex_ascii_view);
+ debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL);
- debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
+ debug_register_view(dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL);
return 0;
}
/*
*
- * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.3 $)
+ * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $)
*
- * CTC / ESCON network driver - s390 dbf exploit.
+ * Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.3 $ $Date: 2004/07/28 12:27:54 $
+ * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define DBF_TEXT(name,level,text) \
do { \
- debug_text_event(ctc_dbf_##name,level,text); \
+ debug_text_event(dbf_##name,level,text); \
} while (0)
#define DBF_HEX(name,level,addr,len) \
do { \
- debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
+ debug_event(dbf_##name,level,(void*)(addr),len); \
} while (0)
-extern DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
-extern debug_info_t *ctc_dbf_setup;
-extern debug_info_t *ctc_dbf_data;
-extern debug_info_t *ctc_dbf_trace;
+extern DEFINE_PER_CPU(char[256], dbf_txt_buf);
+extern debug_info_t *dbf_setup;
+extern debug_info_t *dbf_data;
+extern debug_info_t *dbf_trace;
#define DBF_TEXT_(name,level,text...) \
do { \
- char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
- sprintf(ctc_dbf_txt_buf, text); \
- debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
- put_cpu_var(ctc_dbf_txt_buf); \
+ char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \
+ sprintf(dbf_txt_buf, text); \
+ debug_text_event(dbf_##name,level,dbf_txt_buf); \
+ put_cpu_var(dbf_txt_buf); \
} while (0)
#define DBF_SPRINTF(name,level,text...) \
do { \
- debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
- debug_sprintf_event(ctc_dbf_trace, level, text ); \
+ debug_sprintf_event(dbf_trace, level, ##text ); \
+ debug_sprintf_event(dbf_trace, level, text ); \
} while (0)
-int ctc_register_dbf_views(void);
+int register_dbf_views(void);
-void ctc_unregister_dbf_views(void);
+void unregister_dbf_views(void);
/**
* some more debug stuff
/*
- * $Id: ctcmain.c,v 1.63 2004/07/28 12:27:54 ptiedem Exp $
+ * $Id: ctcmain.c,v 1.61 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.63 $
+ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.61 $
*
*/
\f
print_banner(void)
{
static int printed = 0;
- char vbuf[] = "$Revision: 1.63 $";
+ char vbuf[] = "$Revision: 1.61 $";
char *version = vbuf;
if (printed)
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
__u16 len = *((__u16 *) pskb->data);
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
skb_pull(pskb, 2);
pskb->dev = dev;
if (ch->protocol == CTC_PROTO_LINUX_TTY)
ctc_tty_netif_rx(skb);
else
- netif_rx_ni(skb);
+ netif_rx(skb);
/**
* Successful rx; reset logflags
*/
static void inline
ccw_check_return_code(struct channel *ch, int return_code, char *msg)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (return_code) {
case 0:
fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
static void inline
ccw_unit_check(struct channel *ch, unsigned char sense)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->protocol != CTC_PROTO_LINUX_TTY)
{
struct sk_buff *skb;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
static __inline__ int
ctc_checkalloc_buffer(struct channel *ch, int warn)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((ch->trans_skb == NULL) ||
(ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
if (ch->trans_skb != NULL)
unsigned long duration;
struct timespec done_stamp = xtime;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
{
struct channel *ch = (struct channel *) arg;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_TXIDLE);
fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
int check_len;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (len < 8) {
ctc_pr_debug("%s: got packet with length %d < 8\n",
struct channel *ch = (struct channel *) arg;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(fi) == CH_STATE_TXIDLE)
ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
__u16 buflen;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
buflen = *((__u16 *) ch->trans_skb->data);
#ifdef DEBUG
int rc;
unsigned long saveflags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
fsm_newstate(fi, CH_STATE_SETUPWAIT);
int rc;
struct net_device *dev;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ch == NULL) {
ctc_pr_warn("ch_action_start ch=NULL\n");
return;
int rc;
int oldstate;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
if (event == CH_EVENT_STOP)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_STOPPED);
if (ch->trans_skb != NULL) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_NOTOP);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
/**
* Special case: Got UC_RCRESET on setmode.
* This means that remote side isn't setup. In this case
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: %s channel restart\n", dev->name,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
if (event == CH_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
fsm_newstate(fi, CH_STATE_RXERR);
ctc_pr_warn("%s: RX initialization failed\n", dev->name);
ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
struct channel *ch2;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
dev->name);
struct net_device *dev = ch->netdev;
unsigned long saveflags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
ctc_pr_debug("%s: TX retry failed, restarting channel\n",
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
ctc_pr_debug("%s: RX I/O error\n", dev->name);
struct net_device *dev = ch->netdev;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
ch_action_iofatal(fi, event, arg);
fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
}
{
struct channel *ch = channels;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
#ifdef DEBUG
ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
__func__, id, type);
struct net_device *dev;
struct ctc_priv *priv;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (__ctc_check_irb_error(cdev, irb))
return;
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
fsm_deltimer(&privptr->restart_timer);
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct channel *ch = privptr->channel[direction];
struct net_device *dev = (struct net_device *)arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
ctc_pr_debug("%s: Restarting\n", dev->name);
dev_action_stop(fi, event, arg);
fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
if (event == DEV_EVENT_RXUP)
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
if (privptr->protocol == CTC_PROTO_LINUX_TTY)
struct ll_header header;
int rc = 0;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
static int
ctc_open(struct net_device * dev)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
static int
ctc_close(struct net_device * dev)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
return 0;
}
int rc = 0;
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
/**
* Some sanity checks ...
*/
{
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((new_mtu < 576) || (new_mtu > 65527) ||
(new_mtu > (privptr->channel[READ]->max_bufsize -
LL_HEADER_LENGTH - 2)))
struct net_device *ndev;
int bs1;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
struct ctc_priv *priv;
int ll1;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
char *sbuf;
char *p;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!priv)
return;
sbuf = (char *)kmalloc(2048, GFP_KERNEL);
if (!privptr)
return NULL;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
struct ctc_priv *priv;
int value;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = dev->driver_data;
int rc;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!get_device(&cgdev->dev))
return -ENODEV;
int ret;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
privptr = cgdev->dev.driver_data;
if (!privptr)
struct ctc_priv *priv;
struct net_device *ndev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = cgdev->dev.driver_data;
struct ctc_priv *priv;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = cgdev->dev.driver_data;
if (!priv)
{
unregister_cu3088_discipline(&ctc_group_driver);
ctc_tty_cleanup();
- ctc_unregister_dbf_views();
+ unregister_dbf_views();
ctc_pr_info("CTC driver unloaded\n");
}
print_banner();
- ret = ctc_register_dbf_views();
+ ret = register_dbf_views();
if (ret){
- ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
+ ctc_pr_crit("ctc_init failed with register_dbf_views rc = %d\n", ret);
return ret;
}
ctc_tty_init();
ret = register_cu3088_discipline(&ctc_group_driver);
if (ret) {
ctc_tty_cleanup();
- ctc_unregister_dbf_views();
+ unregister_dbf_views();
}
return ret;
}
/*
- * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
+ * $Id: ctctty.c,v 1.21 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver, tty interface.
*
#include <linux/tty.h>
#include <linux/serial_reg.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <asm/uaccess.h>
#include <linux/devfs_fs_kernel.h>
#include "ctctty.h"
int len;
struct tty_struct *tty;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
c = TTY_FLIPBUF_SIZE - tty->flip.count;
int ret = 1;
struct tty_struct *tty;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
int c = TTY_FLIPBUF_SIZE - tty->flip.count;
{
int i;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((!driver) || ctc_tty_shuttingdown)
return;
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
int i;
ctc_tty_info *info = NULL;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!skb)
return;
if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
int wake = 1;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->netdev) {
if (skb)
kfree_skb(skb);
int skb_res;
struct sk_buff *skb;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
static void
ctc_tty_transmit_status(ctc_tty_info *info)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
info->flags |= CTC_ASYNC_TX_LINESTAT;
unsigned int quot;
int i;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->tty || !info->tty->termios)
return;
cflag = info->tty->termios->c_cflag;
static int
ctc_tty_startup(ctc_tty_info * info)
{
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (info->flags & CTC_ASYNC_INITIALIZED)
return 0;
#ifdef CTC_DEBUG_MODEM_OPEN
static void
ctc_tty_shutdown(ctc_tty_info * info)
{
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!(info->flags & CTC_ASYNC_INITIALIZED))
return;
#ifdef CTC_DEBUG_MODEM_OPEN
int total = 0;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
goto ex;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
ctc_tty_info *info;
unsigned long flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!tty)
goto ex;
spin_lock_irqsave(&ctc_tty_lock, flags);
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
return;
info->mcr &= ~UART_MCR_RTS;
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
return;
info->mcr |= UART_MCR_RTS;
uint result;
ulong flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, flags);
status = info->lsr;
spin_unlock_irqrestore(&ctc_tty_lock, flags);
uint result;
ulong flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
int error;
int retval;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
ctc_tty_change_speed(info);
/* Handle transition to B0 */
unsigned long flags;
int retval;
- DBF_TEXT(trace, 4, __FUNCTION__);
/*
* If the device is in the middle of being closed, then block
* until it's done, and then try again.
int retval,
line;
- DBF_TEXT(trace, 3, __FUNCTION__);
line = tty->index;
if (line < 0 || line > CTC_TTY_MAX_DEVICES)
return -ENODEV;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
ulong flags;
ulong timeout;
- DBF_TEXT(trace, 3, __FUNCTION__);
+
if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
return;
spin_lock_irqsave(&ctc_tty_lock, flags);
*/
timeout = jiffies + HZ;
while (!(info->lsr & UART_LSR_TEMT)) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&ctc_tty_lock, flags);
- msleep(500);
+ schedule_timeout(HZ/2);
spin_lock_irqsave(&ctc_tty_lock, flags);
if (time_after(jiffies,timeout))
break;
{
ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
unsigned long saveflags;
- DBF_TEXT(trace, 3, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
return;
ctc_tty_shutdown(info);
unsigned long saveflags;
int again;
- DBF_TEXT(trace, 3, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
if ((!ctc_tty_shuttingdown) && info) {
again = ctc_tty_tint(info);
ctc_tty_info *info;
struct tty_driver *device;
- DBF_TEXT(trace, 2, __FUNCTION__);
driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
if (driver == NULL) {
printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
char *err;
char *p;
- DBF_TEXT(trace, 2, __FUNCTION__);
if ((!dev) || (!dev->name)) {
printk(KERN_WARNING
"ctc_tty_register_netdev called "
unsigned long saveflags;
ctc_tty_info *info = NULL;
- DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
if (driver->info[i].netdev == dev) {
ctc_tty_cleanup(void) {
unsigned long saveflags;
- DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
ctc_tty_shuttingdown = 1;
spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
/*
- * $Id: iucv.c,v 1.40 2004/08/04 12:29:33 cborntra Exp $
+ * $Id: iucv.c,v 1.38 2004/07/09 15:59:53 mschwide Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.40 $
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.38 $
*
*/
\f
#include <asm/io.h>
#include <asm/s390_ext.h>
#include <asm/ebcdic.h>
-#include <asm/smp.h>
#include <asm/ccwdev.h> //for root device stuff
/* FLAGS:
static void
iucv_banner(void)
{
- char vbuf[] = "$Revision: 1.40 $";
+ char vbuf[] = "$Revision: 1.38 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
+ case -ENODEV:
+ err = "No CPU can be reserved";
+ break;
case 0x03:
err = "Directory error";
break;
*/
#include <linux/types.h>
-#include <asm/debug.h>
-
-/**
- * Debug Facility stuff
- */
-#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
-#define IUCV_DBF_SETUP_INDEX 1
-#define IUCV_DBF_SETUP_NR_AREAS 1
-#define IUCV_DBF_SETUP_LEVEL 3
-
-#define IUCV_DBF_DATA_NAME "iucv_data"
-#define IUCV_DBF_DATA_LEN 128
-#define IUCV_DBF_DATA_INDEX 1
-#define IUCV_DBF_DATA_NR_AREAS 1
-#define IUCV_DBF_DATA_LEVEL 2
-
-#define IUCV_DBF_TRACE_NAME "iucv_trace"
-#define IUCV_DBF_TRACE_LEN 16
-#define IUCV_DBF_TRACE_INDEX 2
-#define IUCV_DBF_TRACE_NR_AREAS 1
-#define IUCV_DBF_TRACE_LEVEL 3
-
-#define IUCV_DBF_TEXT(name,level,text) \
- do { \
- debug_text_event(iucv_dbf_##name,level,text); \
- } while (0)
-
-#define IUCV_DBF_HEX(name,level,addr,len) \
- do { \
- debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
- } while (0)
-
-extern DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
-
-#define IUCV_DBF_TEXT_(name,level,text...) \
- do { \
- char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
- put_cpu_var(iucv_dbf_txt_buf); \
- } while (0)
-
-#define IUCV_DBF_SPRINTF(name,level,text...) \
- do { \
- debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
- debug_sprintf_event(iucv_dbf_trace, level, text ); \
- } while (0)
-
-/**
- * some more debug stuff
- */
-#define IUCV_HEXDUMP16(importance,header,ptr) \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
- *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
- *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
- *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
- *(((char*)ptr)+12),*(((char*)ptr)+13), \
- *(((char*)ptr)+14),*(((char*)ptr)+15)); \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)+16),*(((char*)ptr)+17), \
- *(((char*)ptr)+18),*(((char*)ptr)+19), \
- *(((char*)ptr)+20),*(((char*)ptr)+21), \
- *(((char*)ptr)+22),*(((char*)ptr)+23), \
- *(((char*)ptr)+24),*(((char*)ptr)+25), \
- *(((char*)ptr)+26),*(((char*)ptr)+27), \
- *(((char*)ptr)+28),*(((char*)ptr)+29), \
- *(((char*)ptr)+30),*(((char*)ptr)+31));
-
-static inline void
-iucv_hex_dump(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; i++) {
- if (i && !(i % 16))
- printk("\n");
- printk("%02x ", *(buf + i));
- }
- printk("\n");
-}
-/**
- * end of debug stuff
- */
-
#define uchar unsigned char
#define ushort unsigned short
#define ulong unsigned long
* Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*
- * $Revision: 1.85 $ $Date: 2004/08/04 11:05:43 $
+ * $Revision: 1.83 $ $Date: 2004/06/30 12:48:14 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/igmp.h>
-#include <linux/delay.h>
#include <net/arp.h>
#include <net/ip.h>
/**
* initialization string for output
*/
-#define VERSION_LCS_C "$Revision: 1.85 $"
+#define VERSION_LCS_C "$Revision: 1.83 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
static char debug_buffer[255];
card->dev->name);
return 0;
}
- msleep(3000);
+ schedule_timeout(3 * HZ);
}
PRINT_ERR("Error in Reseting LCS card!\n");
return -EIO;
/*
- * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
+ * $Id: netiucv.c,v 1.57 2004/06/30 09:26:40 braunu Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
+ * RELEASE-TAG: IUCV network driver $Revision: 1.57 $
*
*/
\f
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
\f
-#define PRINTK_HEADER " iucv: " /* for debugging */
-
-static struct device_driver netiucv_driver = {
- .name = "netiucv",
- .bus = &iucv_bus,
-};
-
/**
* Per connection profiling data
*/
/**
* Linked list of all connection structs.
*/
-static struct iucv_connection *iucv_connections;
+static struct iucv_connection *connections;
/**
* Representation of event-data for the
* match exactly as specified in order to give connection_pending()
* control.
*/
-static __u8 netiucv_mask[] = {
+static __u8 mask[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
\f
-/**
- * Debug Facility Stuff
- */
-static debug_info_t *iucv_dbf_setup = NULL;
-static debug_info_t *iucv_dbf_data = NULL;
-static debug_info_t *iucv_dbf_trace = NULL;
-
-DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
-
-static void
-iucv_unregister_dbf_views(void)
-{
- if (iucv_dbf_setup)
- debug_unregister(iucv_dbf_setup);
- if (iucv_dbf_data)
- debug_unregister(iucv_dbf_data);
- if (iucv_dbf_trace)
- debug_unregister(iucv_dbf_trace);
-}
-static int
-iucv_register_dbf_views(void)
-{
- iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
- IUCV_DBF_SETUP_INDEX,
- IUCV_DBF_SETUP_NR_AREAS,
- IUCV_DBF_SETUP_LEN);
- iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
- IUCV_DBF_DATA_INDEX,
- IUCV_DBF_DATA_NR_AREAS,
- IUCV_DBF_DATA_LEN);
- iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
- IUCV_DBF_TRACE_INDEX,
- IUCV_DBF_TRACE_NR_AREAS,
- IUCV_DBF_TRACE_LEN);
-
- if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
- (iucv_dbf_trace == NULL)) {
- iucv_unregister_dbf_views();
- return -ENOMEM;
- }
- debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
-
- debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
-
- debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
-
- return 0;
-}
-
/**
* Callback-wrappers, called from lowlevel iucv layer.
*****************************************************************************/
struct sk_buff *skb;
ll_header *header = (ll_header *)pskb->data;
- if (!header->next)
+ if (header->next == 0)
break;
skb_pull(pskb, NETIUCV_HDRLEN);
offset += header->next;
header->next -= NETIUCV_HDRLEN;
if (skb_tailroom(pskb) < header->next) {
- PRINT_WARN("%s: Illegal next field in iucv header: "
+ printk(KERN_WARNING
+ "%s: Illegal next field in iucv header: "
"%d > %d\n",
dev->name, header->next, skb_tailroom(pskb));
- IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
- header->next, skb_tailroom(pskb));
return;
}
skb_put(pskb, header->next);
pskb->mac.raw = pskb->data;
skb = dev_alloc_skb(pskb->len);
if (!skb) {
- PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
+ printk(KERN_WARNING
+ "%s Out of memory in netiucv_unpack_skb\n",
dev->name);
- IUCV_DBF_TEXT(data, 2,
- "Out of memory in netiucv_unpack_skb\n");
privptr->stats.rx_dropped++;
return;
}
struct iucv_event *ev = (struct iucv_event *)arg;
struct iucv_connection *conn = ev->conn;
iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
- struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)conn->netdev->priv;
__u32 msglen = eib->ln1msg2.ipbfln1f;
int rc;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (!conn->netdev) {
/* FRITZ: How to tell iucv LL to drop the msg? */
- PRINT_WARN("Received data for unlinked connection\n");
- IUCV_DBF_TEXT(data, 2,
- "Received data for unlinked connection\n");
+ printk(KERN_WARNING
+ "Received data for unlinked connection\n");
return;
}
if (msglen > conn->max_buffsize) {
/* FRITZ: How to tell iucv LL to drop the msg? */
privptr->stats.rx_dropped++;
- PRINT_WARN("msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
- IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
return;
}
conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
conn->rx_buff->len = 0;
rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
conn->rx_buff->data, msglen, NULL, NULL, NULL);
- if (rc || msglen < 5) {
+ if (rc != 0 || msglen < 5) {
privptr->stats.rx_errors++;
- PRINT_WARN("iucv_receive returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+ printk(KERN_INFO "iucv_receive returned %08x\n", rc);
return;
}
netiucv_unpack_skb(conn, conn->rx_buff);
unsigned long saveflags;
ll_header header;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (conn && conn->netdev && conn->netdev->priv)
privptr = (struct netiucv_priv *)conn->netdev->priv;
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc) {
+ if (rc != 0) {
conn->prof.tx_pending--;
fsm_newstate(fi, CONN_STATE_IDLE);
if (privptr)
privptr->stats.tx_errors += txpackets;
- PRINT_WARN("iucv_send returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ printk(KERN_INFO "iucv_send returned %08x\n",
+ rc);
} else {
if (privptr) {
privptr->stats.tx_packets += txpackets;
__u16 msglimit;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
conn->handle, conn, NULL, &msglimit);
- if (rc) {
- PRINT_WARN("%s: IUCV accept failed with error %d\n",
+ if (rc != 0) {
+ printk(KERN_WARNING
+ "%s: IUCV accept failed with error %d\n",
netdev->name, rc);
- IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
}
fsm_newstate(fi, CONN_STATE_IDLE);
iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
iucv_sever(eib->ippathid, udata);
if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Pending; "
- "pathid %d does not match original pathid %d\n",
+ printk(KERN_INFO
+ "%s: IR Connection Pending; pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connreject: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
iucv_sever(conn->pathid, udata);
}
}
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Complete; "
- "pathid %d does not match original pathid %d\n",
+ printk(KERN_INFO
+ "%s: IR Connection Complete; pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connack: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
conn->pathid = eib->ippathid;
}
netdev->tx_queue_len = eib->ipmsglim;
struct iucv_connection *conn = (struct iucv_connection *)arg;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
- PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
- IUCV_DBF_TEXT(data, 2,
- "conn_action_connsever: Remote dropped connection\n");
+ printk(KERN_INFO "%s: Remote dropped connection\n",
+ netdev->name);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
__u16 msglimit;
int rc;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
- if (!conn->handle) {
- IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
+ if (conn->handle == 0) {
conn->handle =
- iucv_register_program(iucvMagic, conn->userid,
- netiucv_mask,
+ iucv_register_program(iucvMagic, conn->userid, mask,
&netiucv_ops, conn);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- if (!conn->handle) {
+ if (conn->handle <= 0) {
fsm_newstate(fi, CONN_STATE_REGERR);
- conn->handle = NULL;
- IUCV_DBF_TEXT(setup, 2,
- "NULL from iucv_register_program\n");
+ conn->handle = 0;
return;
}
- PRINT_DEBUG("%s('%s'): registered successfully\n",
+ pr_debug("%s('%s'): registered successfully\n",
conn->netdev->name, conn->userid);
}
- PRINT_DEBUG("%s('%s'): connecting ...\n",
+ pr_debug("%s('%s'): connecting ...\n",
conn->netdev->name, conn->userid);
/* We must set the state before calling iucv_connect because the callback
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
- conn->userid, iucv_host, 0, NULL, &msglimit,
- conn->handle, conn);
+ conn->userid, iucv_host, 0, NULL, &msglimit, conn->handle,
+ conn);
switch (rc) {
case 0:
conn->netdev->tx_queue_len = msglimit;
CONN_EVENT_TIMER, conn);
return;
case 11:
- PRINT_INFO("%s: User %s is currently not available.\n",
+ printk(KERN_NOTICE
+ "%s: User %s is currently not available.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 12:
- PRINT_INFO("%s: User %s is currently not ready.\n",
+ printk(KERN_NOTICE
+ "%s: User %s is currently not ready.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 13:
- PRINT_WARN("%s: Too many IUCV connections.\n",
+ printk(KERN_WARNING
+ "%s: Too many IUCV connections.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 14:
- PRINT_WARN(
+ printk(KERN_WARNING
"%s: User %s has too many IUCV connections.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
- PRINT_WARN(
+ printk(KERN_WARNING
"%s: No IUCV authorization in CP directory.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
default:
- PRINT_WARN("%s: iucv_connect returned error %d\n",
+ printk(KERN_WARNING
+ "%s: iucv_connect returned error %d\n",
conn->netdev->name, rc);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
}
- IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
}
static void
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->handle)
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
netiucv_purge_skb_queue(&conn->commit_queue);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
struct iucv_connection *conn = ev->conn;
struct net_device *netdev = conn->netdev;
- PRINT_WARN("%s: Cannot connect without username\n",
+ printk(KERN_WARNING
+ "%s: Cannot connect without username\n",
netdev->name);
- IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
}
static const fsm_node conn_fsm[] = {
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
ev.conn = privptr->conn;
fsm_newstate(fi, DEV_STATE_STARTWAIT);
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
ev.conn = privptr->conn;
struct net_device *dev = (struct net_device *)arg;
struct netiucv_priv *privptr = dev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
fsm_newstate(fi, DEV_STATE_RUNNING);
- PRINT_INFO("%s: connected with remote side %s\n",
+ printk(KERN_INFO
+ "%s: connected with remote side %s\n",
dev->name, privptr->conn->userid);
- IUCV_DBF_TEXT(setup, 3,
- "connection is up and running\n");
break;
case DEV_STATE_STOPWAIT:
- PRINT_INFO(
- "%s: got connection UP event during shutdown!\n",
+ printk(KERN_INFO
+ "%s: got connection UP event during shutdown!!\n",
dev->name);
- IUCV_DBF_TEXT(data, 2,
- "dev_action_connup: in DEV_STATE_STOPWAIT\n");
break;
}
}
static void
dev_action_conndown(fsm_instance *fi, int event, void *arg)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
break;
case DEV_STATE_STOPWAIT:
fsm_newstate(fi, DEV_STATE_STOPPED);
- IUCV_DBF_TEXT(setup, 3, "connection is down\n");
break;
}
}
spin_lock_irqsave(&conn->collect_lock, saveflags);
if (conn->collect_len + l >
- (conn->max_buffsize - NETIUCV_HDRLEN)) {
+ (conn->max_buffsize - NETIUCV_HDRLEN))
rc = -EBUSY;
- IUCV_DBF_TEXT(data, 2,
- "EBUSY from netiucv_transmit_skb\n");
- } else {
+ else {
atomic_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
- PRINT_WARN("%s: Could not allocate tx_skb\n",
+ printk(KERN_WARNING
+ "%s: Could not allocate tx_skb\n",
conn->netdev->name);
- IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
rc = -ENOMEM;
return rc;
} else {
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc) {
+ if (rc != 0) {
struct netiucv_priv *privptr;
fsm_newstate(conn->fsm, CONN_STATE_IDLE);
conn->prof.tx_pending--;
skb_pull(skb, NETIUCV_HDRLEN);
skb_trim(skb, skb->len - NETIUCV_HDRLEN);
}
- PRINT_WARN("iucv_send returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ printk(KERN_INFO "iucv_send returned %08x\n",
+ rc);
} else {
if (copied)
dev_kfree_skb(skb);
*/
static int
netiucv_open(struct net_device *dev) {
- fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
+ fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
int rc = 0;
struct netiucv_priv *privptr = dev->priv;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
/**
* Some sanity checks ...
*/
if (skb == NULL) {
- PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
- IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+ printk(KERN_WARNING "%s: NULL sk_buff passed\n", dev->name);
privptr->stats.tx_dropped++;
return 0;
}
- if (skb_headroom(skb) < NETIUCV_HDRLEN) {
- PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
+ if (skb_headroom(skb) < (NETIUCV_HDRLEN)) {
+ printk(KERN_WARNING
+ "%s: Got sk_buff with head room < %ld bytes\n",
dev->name, NETIUCV_HDRLEN);
- IUCV_DBF_TEXT(data, 2,
- "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
return 0;
return 0;
}
- if (netiucv_test_and_set_busy(dev)) {
- IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+ if (netiucv_test_and_set_busy(dev))
return -EBUSY;
- }
+
dev->trans_start = jiffies;
- if (netiucv_transmit_skb(privptr->conn, skb))
+ if (netiucv_transmit_skb(privptr->conn, skb) != 0)
rc = 1;
netiucv_clear_busy(dev);
return rc;
static struct net_device_stats *
netiucv_stats (struct net_device * dev)
{
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return &((struct netiucv_priv *)dev->priv)->stats;
}
static int
netiucv_change_mtu (struct net_device * dev, int new_mtu)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
- IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
+ if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX))
return -EINVAL;
- }
dev->mtu = new_mtu;
return 0;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
}
char username[10];
int i;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int)count);
+ printk(KERN_WARNING
+ "netiucv: username too long (%d)!\n", (int)count);
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- PRINT_WARN("netiucv: Invalid char %c in username!\n",
- *p);
- IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n",
- *p);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in username!\n");
return -EINVAL;
}
}
username[i++] = ' ';
username[9] = '\0';
- if (memcmp(username, priv->conn->userid, 8)) {
+ if (memcmp(username, priv->conn->userid, 8) != 0) {
/* username changed */
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: device %s active, connected to %s\n",
dev->bus_id, priv->conn->userid);
- PRINT_WARN("netiucv: user cannot be updated\n");
- IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+ printk(KERN_WARNING
+ "netiucv: user cannot be updated\n");
return -EBUSY;
}
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
char *e;
int bs1;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count >= 39)
return -EINVAL;
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- PRINT_WARN("netiucv: Invalid character in buffer!\n");
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in buffer!\n");
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
- PRINT_WARN("netiucv: Given buffer size %d too large.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too large\n",
+ printk(KERN_WARNING
+ "netiucv: Given buffer size %d too large.\n",
bs1);
+
return -EINVAL;
}
if ((ndev->flags & IFF_RUNNING) &&
- (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
- PRINT_WARN("netiucv: Given buffer size %d too small.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too small\n",
- bs1);
+ (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2)))
return -EINVAL;
- }
- if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
- PRINT_WARN("netiucv: Given buffer size %d too small.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too small\n",
- bs1);
+ if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN))
return -EINVAL;
- }
priv->conn->max_buffsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
dev_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
conn_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
maxmulti_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
maxmulti_write (struct device *dev, const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+
priv->conn->prof.maxmulti = 0;
return count;
}
maxcq_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.maxcqueue = 0;
return count;
}
sdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.doios_single = 0;
return count;
}
mdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
priv->conn->prof.doios_multi = 0;
return count;
}
txlen_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.txlen = 0;
return count;
}
txtime_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_time = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_pending = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_max_pending = 0;
return count;
}
{
int ret;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
+
ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
if (ret)
return ret;
static inline void
netiucv_remove_files(struct device *dev)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
}
int ret;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (dev) {
memset(dev, 0, sizeof(struct device));
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
- dev->driver = &netiucv_driver;
} else
return -ENOMEM;
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
- priv->dev = dev;
dev->driver_data = priv;
+ priv->dev = dev;
return 0;
out_unreg:
static void
netiucv_unregister_device(struct device *dev)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
netiucv_remove_files(dev);
device_unregister(dev);
}
/**
* Allocate and initialize a new connection structure.
- * Add it to the list of netiucv connections;
+ * Add it to the list of connections;
*/
static struct iucv_connection *
netiucv_new_connection(struct net_device *dev, char *username)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
struct iucv_connection *conn =
(struct iucv_connection *)
kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
/**
* Release a connection structure and remove it from the
- * list of netiucv connections.
+ * list of connections.
*/
static void
netiucv_remove_connection(struct iucv_connection *conn)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
+
+ pr_debug("%s() called\n", __FUNCTION__);
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (conn == NULL)
return;
while (*clist) {
if (*clist == conn) {
*clist = conn->next;
- if (conn->handle) {
+ if (conn->handle != 0) {
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
}
fsm_deltimer(&conn->timer);
kfree_fsm(conn->fsm);
{
struct netiucv_priv *privptr;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (!dev)
return;
netiucv_remove_connection(privptr->conn);
if (privptr->fsm)
kfree_fsm(privptr->fsm);
- privptr->conn = NULL; privptr->fsm = NULL;
+ privptr->conn = 0; privptr->fsm = 0;
/* privptr gets freed by free_netdev() */
}
free_netdev(dev);
netiucv_setup_netdevice);
if (!dev)
return NULL;
- if (dev_alloc_name(dev, dev->name) < 0) {
- free_netdev(dev);
- return NULL;
- }
- privptr = (struct netiucv_priv *)dev->priv;
+ privptr = (struct netiucv_priv *)dev->priv;
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
- if (!privptr->fsm) {
+ if (privptr->fsm == NULL) {
free_netdev(dev);
return NULL;
}
if (!privptr->conn) {
kfree_fsm(privptr->fsm);
free_netdev(dev);
- IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
return NULL;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
int i, ret;
struct net_device *dev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+ printk(KERN_WARNING
+ "netiucv: username too long (%d)!\n", (int)count);
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- PRINT_WARN("netiucv: Invalid character in username!\n");
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in username!\n");
return -EINVAL;
}
}
username[9] = '\0';
dev = netiucv_init_netdevice(username);
if (!dev) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: Could not allocate network device structure "
"for user '%s'\n", netiucv_printname(username));
- IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
}
-
- if ((ret = netiucv_register_device(dev))) {
- IUCV_DBF_TEXT_(setup, 2,
- "ret %d from netiucv_register_device\n", ret);
+
+ if ((ret = register_netdev(dev))) {
goto out_free_ndev;
}
- /* sysfs magic */
- SET_NETDEV_DEV(dev,
- (struct device*)((struct netiucv_priv*)dev->priv)->dev);
-
- if ((ret = register_netdev(dev))) {
- netiucv_unregister_device((struct device*)
- ((struct netiucv_priv*)dev->priv)->dev);
+ if ((ret = netiucv_register_device(dev))) {
+ unregister_netdev(dev);
goto out_free_ndev;
}
- PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, (struct device*)((struct netiucv_priv*)dev->priv)->dev);
+ printk(KERN_INFO "%s: '%s'\n", dev->name, netiucv_printname(username));
return count;
out_free_ndev:
- PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
- IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
+ printk(KERN_WARNING
+ "netiucv: Could not register '%s'\n", dev->name);
netiucv_free_netdevice(dev);
return ret;
}
static ssize_t
remove_write (struct device_driver *drv, const char *buf, size_t count)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
char *p;
int i;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (count >= IFNAMSIZ)
count = IFNAMSIZ-1;
continue;
}
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: net device %s active with peer %s\n",
ndev->name, priv->conn->userid);
- PRINT_WARN("netiucv: %s cannot be removed\n",
+ printk(KERN_WARNING
+ "netiucv: %s cannot be removed\n",
ndev->name);
- IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EBUSY;
}
unregister_netdev(ndev);
netiucv_unregister_device(dev);
return count;
}
- PRINT_WARN("netiucv: net device %s unknown\n", name);
- IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+ printk(KERN_WARNING
+ "netiucv: net device %s unknown\n", name);
return -EINVAL;
}
DRIVER_ATTR(remove, 0200, NULL, remove_write);
+static struct device_driver netiucv_driver = {
+ .name = "netiucv",
+ .bus = &iucv_bus,
+};
+
static void
netiucv_banner(void)
{
- char vbuf[] = "$Revision: 1.63 $";
+ char vbuf[] = "$Revision: 1.57 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
*p = '\0';
} else
version = " ??? ";
- PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
+ printk(KERN_INFO "NETIUCV driver Version%s initialized\n", version);
}
static void __exit
netiucv_exit(void)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- while (iucv_connections) {
- struct net_device *ndev = iucv_connections->netdev;
+ while (connections) {
+ struct net_device *ndev = connections->netdev;
struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
struct device *dev = priv->dev;
driver_remove_file(&netiucv_driver, &driver_attr_connection);
driver_remove_file(&netiucv_driver, &driver_attr_remove);
driver_unregister(&netiucv_driver);
- iucv_unregister_dbf_views();
- PRINT_INFO("NETIUCV driver unloaded\n");
+ printk(KERN_INFO "NETIUCV driver unloaded\n");
return;
}
{
int ret;
- ret = iucv_register_dbf_views();
- if (ret) {
- PRINT_WARN("netiucv_init failed, "
- "iucv_register_dbf_views rc = %d\n", ret);
- return ret;
- }
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ret = driver_register(&netiucv_driver);
- if (ret) {
- PRINT_ERR("NETIUCV: failed to register driver.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
- iucv_unregister_dbf_views();
+ if (ret != 0) {
+ printk(KERN_ERR "NETIUCV: failed to register driver.\n");
return ret;
}
/* Add entry for specifying connections. */
ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
- if (!ret) {
+ if (ret == 0) {
ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
netiucv_banner();
} else {
- PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
+ printk(KERN_ERR "NETIUCV: failed to add driver attribute.\n");
driver_unregister(&netiucv_driver);
- iucv_unregister_dbf_views();
}
return ret;
}
#include "qeth_mpc.h"
-#define VERSION_QETH_H "$Revision: 1.113 $"
+#define VERSION_QETH_H "$Revision: 1.111 $"
#ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6"
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
-#define atomic_swap(a,b) xchg((int *)a.counter, b)
-
/*
* Common IO related definitions
*/
struct qeth_card;
-enum qeth_out_q_states {
- QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- QETH_OUT_Q_LOCKED_FLUSH,
-};
-
struct qeth_qdio_out_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int queue_no;
struct qeth_card *card;
- atomic_t state;
+ spinlock_t lock;
volatile int do_pack;
/*
* index of buffer to be filled by driver; state EMPTY or PACKING
#ifndef __QETH_FS_H__
#define __QETH_FS_H__
-#define VERSION_QETH_FS_H "$Revision: 1.9 $"
+#define VERSION_QETH_FS_H "$Revision: 1.8 $"
extern const char *VERSION_QETH_PROC_C;
extern const char *VERSION_QETH_SYS_C;
return "HSTR";
case QETH_LINK_TYPE_GBIT_ETH:
return "OSD_1000";
- case QETH_LINK_TYPE_10GBIT_ETH:
- return "OSD_10GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
/*
*
- * linux/drivers/s390/net/qeth_main.c ($Revision: 1.130 $)
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.125 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
- * $Revision: 1.130 $ $Date: 2004/08/05 11:21:50 $
+ * $Revision: 1.125 $ $Date: 2004/06/29 17:28:24 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "qeth_mpc.h"
#include "qeth_fs.h"
-#define VERSION_QETH_C "$Revision: 1.130 $"
+#define VERSION_QETH_C "$Revision: 1.125 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
}
add_timer(&timer);
wait_event(reply->wait_q, reply->received);
- del_timer_sync(&timer);
+ del_timer(&timer);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
QETH_DBF_TEXT(qerr,2,"unexeob");
QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
card->stats.rx_errors++;
return NULL;
}
qeth_rebuild_skb(card, skb, hdr);
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
continue;
}
skb->dev = card->dev;
static inline struct qeth_buffer_pool_entry *
qeth_get_buffer_pool_entry(struct qeth_card *card)
{
- struct qeth_buffer_pool_entry *entry;
+ struct qeth_buffer_pool_entry *entry, *tmp;
QETH_DBF_TEXT(trace, 6, "gtbfplen");
- if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
- struct qeth_buffer_pool_entry, list);
+ entry = NULL;
+ list_for_each_entry_safe(entry, tmp,
+ &card->qdio.in_buf_pool.entry_list, list){
list_del_init(&entry->list);
- return entry;
+ break;
}
- return NULL;
+ return entry;
}
static inline void
buf->buffer->element[i].flags = 0;
while ((skb = skb_dequeue(&buf->skb_list))){
atomic_dec(&skb->users);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
}
}
buf->next_element_to_fill = 0;
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
queue->card->stats.tx_errors += count;
- /* this must not happen under normal circumstances. if it
- * happens something is really wrong -> recover */
- qeth_schedule_recovery(queue->card);
+ /* ok, since do_QDIO went wrong the buffers have not been given
+ * to the hardware. they still belong to us, so we can clear
+ * them and reuse then, i.e. set back next_buf_to_fill*/
+ for (i = index; i < index + count; ++i) {
+ buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ qeth_clear_output_buffer(queue, buf);
+ }
+ queue->next_buf_to_fill = index;
return;
}
atomic_add(count, &queue->used_buffers);
}
/*
- * Switched to packing state if the number of used buffers on a queue
- * reaches a certain limit.
+ * switches between PACKING and non-PACKING state if needed.
+ * has to be called holding queue->lock
*/
-static inline void
-qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
+static inline int
+qeth_switch_packing_state(struct qeth_qdio_out_q *queue)
{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_count = 0;
+
+ QETH_DBF_TEXT(trace, 6, "swipack");
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
#endif
queue->do_pack = 1;
}
- }
-}
-
-/*
- * Switches from packing to non-packing mode. If there is a packing
- * buffer on the queue this buffer will be prepared to be flushed.
- * In that case 1 is returned to inform the caller. If no buffer
- * has to be flushed, zero is returned.
- */
-static inline int
-qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
-{
- struct qeth_qdio_out_buffer *buffer;
- int flush_count = 0;
-
- if (queue->do_pack) {
+ } else {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
return flush_count;
}
-/*
- * Called to flush a packing buffer if no more pci flags are on the queue.
- * Checks if there is a packing buffer and prepares it to be flushed.
- * In that case returns 1, otherwise zero.
- */
-static inline int
-qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
+static inline void
+qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue, int under_int)
{
struct qeth_qdio_out_buffer *buffer;
+ int index;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ index = queue->next_buf_to_fill;
+ buffer = &queue->bufs[index];
if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)){
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
- return 1;
- }
- return 0;
-}
-
-static inline void
-qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
-{
- int index;
- int flush_cnt = 0;
-
- /*
- * check if weed have to switch to non-packing mode or if
- * we have to get a pci flag out on the queue
- */
- if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
- !atomic_read(&queue->set_pci_flags_count)){
- if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
- QETH_OUT_Q_UNLOCKED) {
- /*
- * If we get in here, there was no action in
- * do_send_packet. So, we check if there is a
- * packing buffer to be flushed here.
- */
- /* TODO: try if we get a performance improvement
- * by calling netif_stop_queue here */
- /* save start index for flushing */
- index = queue->next_buf_to_fill;
- flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
- if (!flush_cnt &&
- !atomic_read(&queue->set_pci_flags_count))
- flush_cnt +=
- qeth_flush_buffers_on_no_pci(queue);
- /* were done with updating critical queue members */
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- /* flushing can be done outside the lock */
- if (flush_cnt)
- qeth_flush_buffers(queue, 1, index, flush_cnt);
- }
+ qeth_flush_buffers(queue, under_int, index, 1);
}
}
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
- /* check if we need to do something on this outbound queue */
- qeth_check_outbound_queue(queue);
netif_wake_queue(card->dev);
#ifdef CONFIG_QETH_PERF_STATS
card->qdio.out_qs[i]->do_pack = 0;
atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- atomic_set(&card->qdio.out_qs[i]->state,
- QETH_OUT_Q_UNLOCKED);
+ spin_lock_init(&card->qdio.out_qs[i]->lock);
}
return 0;
}
card->perf_stats.outbound_start_time = qeth_get_micros();
#endif
/*
- * We only call netif_stop_queue in case of errors. Since we've
- * got our own synchronization on queues we can keep the stack's
- * queue running.
+ * dev_queue_xmit should ensure that we are called packet
+ * after packet
*/
- if ((rc = qeth_send_packet(card, skb)))
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
+ if (!(rc = qeth_send_packet(card, skb)))
+ netif_wake_queue(dev);
#ifdef CONFIG_QETH_PERF_STATS
card->perf_stats.outbound_time += qeth_get_micros() -
QETH_DBF_TEXT(trace, 6, "dosndpfa");
- /* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
- /* ... now we've got the queue */
+ spin_lock(&queue->lock);
index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ spin_unlock(&queue->lock);
return -EBUSY;
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
qeth_flush_buffers(queue, 0, index, 1);
+ spin_unlock(&queue->lock);
return 0;
}
QETH_DBF_TEXT(trace, 6, "dosndpkt");
- /* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
+ spin_lock(&queue->lock);
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ spin_unlock(&queue->lock);
return -EBUSY;
}
- /* check if we need to switch packing state of this queue */
- qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack){
/* does packet fit in current buffer? */
if((QETH_MAX_BUFFER_ELEMENTS(card) -
/* we did a step forward, so check buffer state again */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
+ qeth_flush_buffers(queue, 0, start_index, 1);
+ spin_unlock(&queue->lock);
/* return EBUSY because we sent old packet, not
* the current one */
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
}
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
- /*
- * queue->state will go from LOCKED -> UNLOCKED or from
- * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
- * (switch packing state or flush buffer to get another pci flag out).
- * In that case we will enter this loop
- */
- while (atomic_dec_return(&queue->state)){
- /* check if we can go back to non-packing state */
- flush_count += qeth_switch_to_nonpacking_if_needed(queue);
- /*
- * check if we need to flush a packing buffer to get a pci
- * flag out on the queue
- */
- if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
- flush_count += qeth_flush_buffers_on_no_pci(queue);
- }
- /* at this point the queue is UNLOCKED again */
-out:
+ /* check if we need to switch packing state of this queue */
+ flush_count += qeth_switch_packing_state(queue);
+
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
+ if (!atomic_read(&queue->set_pci_flags_count))
+ qeth_flush_buffers_on_no_pci(queue, 0);
+
+ spin_unlock(&queue->lock);
return rc;
}
switch(regnum){
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
- if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ if(card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
/*
*
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.33 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.32 $)
*
* Linux on zSeries OSA Express and HiperSockets support
* This file contains code related to sysfs.
#include "qeth_mpc.h"
#include "qeth_fs.h"
-const char *VERSION_QETH_SYS_C = "$Revision: 1.33 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.32 $";
/*****************************************************************************/
/* */
(card->state != CARD_STATE_RECOVER))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 10);
+ i = simple_strtoul(buf, &tmp, 16);
if ((i < 0) || (i > MAX_ADD_HHLEN)) {
PRINT_WARN("add_hhlen out of range\n");
return -EINVAL;
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_AUX_REVISION "$Revision: 1.115 $"
+#define ZFCP_AUX_REVISION "$Revision: 1.114 $"
#include "zfcp_ext.h"
/* written against the module interface */
static int __init zfcp_module_init(void);
+int zfcp_reboot_handler(struct notifier_block *, unsigned long, void *);
+
/* FCP related */
static void zfcp_ns_gid_pn_handler(unsigned long);
/* initialise configuration rw lock */
rwlock_init(&zfcp_data.config_lock);
+ zfcp_data.reboot_notifier.notifier_call = zfcp_reboot_handler;
+ register_reboot_notifier(&zfcp_data.reboot_notifier);
+
/* save address of data structure managing the driver module */
zfcp_data.scsi_host_template.module = THIS_MODULE;
goto out;
out_ccw_register:
+ unregister_reboot_notifier(&zfcp_data.reboot_notifier);
misc_deregister(&zfcp_cfdc_misc);
out_misc_register:
#ifdef CONFIG_S390_SUPPORT
return retval;
}
+/*
+ * This function is called automatically by the kernel whenever a reboot or a
+ * shut-down is initiated and zfcp is still loaded
+ *
+ * locks: zfcp_data.config_sema is taken prior to shutting down the module
+ * and removing all structures
+ * returns: NOTIFY_DONE in all cases
+ */
+int
+zfcp_reboot_handler(struct notifier_block *notifier, unsigned long code,
+ void *ptr)
+{
+ zfcp_ccw_unregister();
+ return NOTIFY_DONE;
+}
+
+
/*
* function: zfcp_cfdc_dev_ioctl
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_CCW_C_REVISION "$Revision: 1.56 $"
+#define ZFCP_CCW_C_REVISION "$Revision: 1.55 $"
#include "zfcp_ext.h"
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
-static void zfcp_ccw_shutdown(struct device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
- .driver = {
- .shutdown = zfcp_ccw_shutdown,
- },
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
ccw_driver_unregister(&zfcp_ccw_driver);
}
-/**
- * zfcp_ccw_shutdown - gets called on reboot/shutdown
- *
- * Makes sure that QDIO queues are down when the system gets stopped.
- */
-static void
-zfcp_ccw_shutdown(struct device *dev)
-{
- struct zfcp_adapter *adapter;
-
- adapter = dev_get_drvdata(dev);
- zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_erp_wait(adapter);
-}
-
#undef ZFCP_LOG_AREA
#define ZFCP_DEF_H
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_DEF_REVISION "$Revision: 1.81 $"
+#define ZFCP_DEF_REVISION "$Revision: 1.75 $"
/*************************** INCLUDES *****************************************/
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
+#include <linux/reboot.h>
#include <linux/mempool.h>
#include <linux/syscalls.h>
#include <linux/ioctl.h>
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.1.3"
+#define ZFCP_VERSION "4.0.0"
static inline void *
zfcp_sg_to_address(struct scatterlist *list)
lists */
struct semaphore config_sema; /* serialises configuration
changes */
+ struct notifier_block reboot_notifier; /* used to register cleanup
+ functions */
atomic_t loglevel; /* current loglevel */
char init_busid[BUS_ID_SIZE];
wwn_t init_wwpn;
if (ZFCP_LOG_CHECK(level)) { \
_zfcp_hex_dump(addr, count); \
}
+/*
+ * Not yet optimal but useful:
+ * Waits until the condition is met or the timeout occurs.
+ * The condition may be a function call. This allows to
+ * execute some additional instructions in addition
+ * to a simple condition check.
+ * The timeout is modified on exit and holds the remaining time.
+ * Thus it is zero if a timeout ocurred, i.e. the condition was
+ * not met in the specified interval.
+ */
+#define __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
+do { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ while (!(condition) && timeout) \
+ timeout = schedule_timeout(timeout); \
+ current->state = TASK_RUNNING; \
+} while (0);
+
+#define ZFCP_WAIT_EVENT_TIMEOUT(waitqueue, timeout, condition) \
+do { \
+ wait_queue_t entry; \
+ init_waitqueue_entry(&entry, current); \
+ add_wait_queue(&waitqueue, &entry); \
+ __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
+ remove_wait_queue(&waitqueue, &entry); \
+} while (0);
#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_ERP_REVISION "$Revision: 1.61 $"
+#define ZFCP_ERP_REVISION "$Revision: 1.56 $"
#include "zfcp_ext.h"
int retval = 0;
if (send_els->status != 0) {
- ZFCP_LOG_NORMAL("ELS request timed out, force physical port "
- "reopen of port 0x%016Lx on adapter %s\n",
+ ZFCP_LOG_NORMAL("ELS request timed out, physical port reopen "
+ "of port 0x%016Lx on adapter %s failed\n",
port->wwpn, zfcp_get_busid_by_port(port));
debug_text_event(port->adapter->erp_dbf, 3, "forcreop");
retval = zfcp_erp_port_forced_reopen(port, 0);
zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
+ unsigned long timeout;
struct zfcp_adapter *adapter = erp_action->adapter;
retval = zfcp_erp_adapter_strategy_close(erp_action);
ZFCP_LOG_INFO("Waiting to allow the adapter %s "
"to recover itself\n",
zfcp_get_busid_by_adapter(adapter));
- msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
+ /*
+ * SUGGESTION: substitute by
+ * timeout = ZFCP_TYPE2_RECOVERY_TIME;
+ * __ZFCP_WAIT_EVENT_TIMEOUT(timeout, 0);
+ */
+ timeout = ZFCP_TYPE2_RECOVERY_TIME;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
}
return retval;
failed_qdio_activate:
debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
failed_qdio_establish:
debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
/*
ZFCP_LOG_DEBUG("host connection still initialising... "
"waiting and retrying...\n");
/* sleep a little bit before retry */
- msleep(jiffies_to_msecs(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP);
}
} while ((retries--) &&
atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_FSF_C_REVISION "$Revision: 1.55 $"
+#define ZFCP_FSF_C_REVISION "$Revision: 1.49 $"
#include "zfcp_ext.h"
ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
zfcp_get_busid_by_adapter(adapter));
/* wait for woken intiators to clean up their requests */
- msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(ZFCP_FSFREQ_CLEANUP_TIMEOUT);
}
/* consistency check */
{
int retval = 0;
unsigned long lock_flags;
- volatile struct qdio_buffer_element *sbale;
/* setup new FSF request */
retval = zfcp_fsf_req_create(erp_action->adapter,
goto out;
}
- sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
- erp_action->fsf_req->sbal_curr, 0);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
/* mark port as being closed */
atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
&erp_action->port->status);
unsigned long *lock_flags)
{
int condition;
+ unsigned long timeout = ZFCP_SBAL_TIMEOUT;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
- wait_event_interruptible_timeout(adapter->request_wq,
- (condition =
- zfcp_fsf_req_create_sbal_check
- (lock_flags, req_queue, 1)),
- ZFCP_SBAL_TIMEOUT);
+ ZFCP_WAIT_EVENT_TIMEOUT(adapter->request_wq, timeout,
+ (condition =
+ (zfcp_fsf_req_create_sbal_check)
+ (lock_flags, req_queue, 1)));
if (!condition) {
return -EIO;
}
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
- ret = -EIO;
goto failed_sbals;
}
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.41 $"
+#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.40 $"
#include "zfcp_ext.h"
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
char *endp;
- int retval = 0;
+ int retval = -EINVAL;
down(&zfcp_data.config_sema);
}
fcp_lun = simple_strtoull(buf, &endp, 0);
- if ((endp + 1) < (buf + count)) {
- retval = -EINVAL;
+ if ((endp + 1) < (buf + count))
goto out;
- }
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <asm/oplib.h>
#include <asm/ebus.h>
#define __KERNEL_SYSCALLS__
read_unlock(&tasklist_lock);
if (!found)
break;
- msleep(1000);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ);
+ current->state = TASK_RUNNING;
}
kenvctrld_task = NULL;
}
wd_dev.initialized = 1;
}
- return(nonseekable_open(inode, f));
+ return(0);
}
static int wd_release(struct inode *inode, struct file *file)
return(-EINVAL);
}
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
wd_pingtimer(pTimer);
return 1;
if (!found)
break;
- msleep(1000);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ);
}
kenvctrld_task = NULL;
}
static int riowd_open(struct inode *inode, struct file *filp)
{
- nonseekable_open(inode, filp);
return 0;
}
static ssize_t riowd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
riowd_pingtimer();
return 1;
void __init sun4_dvma_init(void)
{
struct sbus_dma *dma;
+ struct sbus_dma *dchain;
struct resource r;
if(sun4_dma_physaddr) {
u32 current_time_ms;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
- void __user *argp = (void __user *)arg;
/* Only let one of these through at a time */
if (down_interruptible(&tw_dev->ioctl_sem)) {
}
/* First copy down the driver command */
- if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
+ if (copy_from_user(&driver_command, (void *)arg, sizeof(TW_Ioctl_Driver_Command)))
goto out2;
/* Check data buffer size */
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, (void *)arg, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
goto out3;
/* See which ioctl we are doing */
twa_get_request_id(tw_dev, &request_id);
/* Flag internal command */
- tw_dev->srb[request_id] = NULL;
+ tw_dev->srb[request_id] = 0;
/* Flag chrdev ioctl */
tw_dev->chrdev_request_id = request_id;
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user((void *)arg, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
/* clear all the negotiated parameters */
__shost_for_each_device(SDp, host)
- SDp->hostdata = NULL;
+ SDp->hostdata = 0;
/* clear all the slots and their pending commands */
for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64)
+ depends on (PCI || ISA || MCA) && SCSI
---help---
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
config SCSI_EATA
tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
- depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+ depends on (ISA || EISA || PCI) && SCSI
---help---
This driver supports all EATA/DMA-compliant SCSI host adapters. DPT
ISA and all EISA I/O addresses are probed looking for the "EATA"
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+ depends on (ISA || EISA || PCI) && SCSI
---help---
Formerly called GDT SCSI Disk Array Controller Support.
* and see if we can do an information transfer,
* with failures we will restart.
*/
- hostdata->selecting = NULL;
+ hostdata->selecting = 0;
/* RvC: have to preset this to indicate a new command is being performed */
if (!NCR5380_select(instance, tmp,
to go to sleep */
}
- hostdata->selecting = NULL;/* clear this pointer, because we passed the
+ hostdata->selecting = 0; /* clear this pointer, because we passed the
waiting period */
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
* only used for debugging.
*/
-#ifdef DBG
+#if DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
#define FIB_COUNTER_INCREMENT(counter)
#endif /* !PCMCIA */
static int registered_count=0;
-static struct Scsi_Host *aha152x_host[2];
+static struct Scsi_Host *aha152x_host[2] = {0, 0};
static Scsi_Host_Template aha152x_driver_template;
/*
void (*end)(struct Scsi_Host *);
int spio;
} states[] = {
- { "idle", NULL, NULL, NULL, 0},
- { "unknown", NULL, NULL, NULL, 0},
- { "seldo", NULL, seldo_run, NULL, 0},
- { "seldi", NULL, seldi_run, NULL, 0},
- { "selto", NULL, selto_run, NULL, 0},
- { "busfree", NULL, busfree_run, NULL, 0},
+ { "idle", 0, 0, 0, 0},
+ { "unknown", 0, 0, 0, 0},
+ { "seldo", 0, seldo_run, 0, 0},
+ { "seldi", 0, seldi_run, 0, 0},
+ { "selto", 0, selto_run, 0, 0},
+ { "busfree", 0, busfree_run, 0, 0},
{ "msgo", msgo_init, msgo_run, msgo_end, 1},
{ "cmd", cmd_init, cmd_run, cmd_end, 1},
- { "msgi", NULL, msgi_run, msgi_end, 1},
- { "status", NULL, status_run, NULL, 1},
+ { "msgi", 0, msgi_run, msgi_end, 1},
+ { "status", 0, status_run, 0, 1},
{ "datai", datai_init, datai_run, datai_end, 0},
{ "datao", datao_init, datao_run, datao_end, 0},
- { "parerr", NULL, parerr_run, NULL, 0},
- { "rsti", NULL, rsti_run, NULL, 0},
+ { "parerr", 0, parerr_run, 0, 0},
+ { "rsti", 0, rsti_run, 0, 0},
};
/* setup & interrupt */
if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
return aha152x_host[i];
- return NULL;
+ return 0;
}
static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
goto out_host_put;
}
- if( scsi_add_host(shpnt, NULL) ) {
+ if( scsi_add_host(shpnt, 0) ) {
free_irq(shpnt->irq, shpnt);
printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no);
goto out_host_put;
return shpnt;
out_host_put:
- aha152x_host[registered_count]=NULL;
+ aha152x_host[registered_count]=0;
scsi_host_put(shpnt);
- return NULL;
+ return 0;
}
void aha152x_release(struct Scsi_Host *shpnt)
}
}
- SCNEXT(SCpnt) = NULL;
+ SCNEXT(SCpnt) = 0;
SCSEM(SCpnt) = sem;
/* setup scratch area
}
#endif
- return aha152x_internal_queue(SCpnt, NULL, 0, done);
+ return aha152x_internal_queue(SCpnt, 0, 0, done);
}
DO_UNLOCK(flags);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=NULL;
+ SCpnt->host_scribble=0;
return SUCCESS;
}
SCpnt->cmd_len = 0;
SCpnt->use_sg = 0;
- SCpnt->request_buffer = NULL;
+ SCpnt->request_buffer = 0;
SCpnt->request_bufflen = 0;
init_timer(&timer);
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=NULL;
+ SCpnt->host_scribble=0;
ret = SUCCESS;
} else {
next = SCNEXT(ptr);
} else {
printk(DEBUG_LEAD "queue corrupted at %p\n", CMDINFO(ptr), ptr);
- next = NULL;
+ next = 0;
}
if (!ptr->device->soft_reset) {
remove_SC(SCs, ptr);
HOSTDATA(shpnt)->commands--;
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
}
ptr = next;
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See Documentation/scsi/aha152x.txt for details.\n");
+ " See /usr/src/linux/Documentation/scsi/aha152x.txt for details.\n");
}
} else {
info_array[0] = info[0];
printk(ERR_LEAD "there's already a completed command %p - will cause abort\n", CMDINFO(CURRENT_SC), DONE_SC);
DONE_SC = CURRENT_SC;
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
DONE_SC->result = error;
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
CURRENT_SC->SCp.phase |= 1 << 16;
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
} else {
done(shpnt, DID_ERROR << 16);
if(!(DONE_SC->SCp.Status & not_issued)) {
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=NULL;
+ DONE_SC=0;
#if 0
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
#endif
ptr->request_bufflen = sizeof(ptr->sense_buffer);
DO_UNLOCK(flags);
- aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
+ aha152x_internal_queue(ptr, 0, check_condition, ptr->scsi_done);
DO_LOCK(flags);
#if 0
} else {
int lun=DONE_SC->device->lun & 0x7;
#endif
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=NULL;
+ DONE_SC=0;
/* turn led off, when no commands are in the driver */
HOSTDATA(shpnt)->commands--;
if(ptr->scsi_done != reset_done) {
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
}
DO_UNLOCK(flags);
DO_LOCK(flags);
}
- DONE_SC=NULL;
+ DONE_SC=0;
#if defined(AHA152X_STAT)
} else {
HOSTDATA(shpnt)->busfree_without_done_command++;
append_SC(&ISSUE_SC, CURRENT_SC);
DO_UNLOCK(flags);
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
}
if(!DISCONNECTED_SC) {
remove_SC(&DISCONNECTED_SC, ptr);
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
ptr->result = DID_RESET << 16;
ptr->scsi_done(ptr);
printk(KERN_DEBUG "none\n");
printk(KERN_DEBUG "disconnected_SC:\n");
- for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL)
+ for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : 0)
show_command(ptr);
disp_ports(shpnt);
if(thislength<0) {
DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: output too short\n");
- *start = NULL;
+ *start = 0;
return 0;
}
aha152x_config conf;
#endif
#ifdef __ISAPNP__
- struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL};
+ struct pnp_dev *dev=0, *pnpdev[2] = {0, 0};
#endif
if ( setup_count ) {
#if defined(__ISAPNP__)
} else if( pnpdev[i] ) {
HOSTDATA(shpnt)->pnpdev=pnpdev[i];
- pnpdev[i]=NULL;
+ pnpdev[i]=0;
#endif
}
} else {
for(i=0; i<ARRAY_SIZE(setup); i++) {
aha152x_release(aha152x_host[i]);
- aha152x_host[i]=NULL;
+ aha152x_host[i]=0;
}
}
my_done = SCtmp->scsi_done;
if (SCtmp->host_scribble) {
kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
+ SCtmp->host_scribble = 0;
}
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
struct ahd_devinfo *devinfo,
u_int lun, cam_status status,
char *message, int verbose_level);
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
static void ahd_setup_target_msgin(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo,
struct scb *scb);
ahd->msgin_index = 0;
}
}
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
else {
if (bus_phase == P_MESGOUT) {
ahd->msg_type =
tstate = ahd->enabled_targets[i];
if (tstate != NULL) {
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
int j;
for (j = 0; j < AHD_NUM_LUNS; j++) {
free(tstate, M_DEVBUF);
}
}
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
if (ahd->black_hole != NULL) {
xpt_free_path(ahd->black_hole->path);
free(ahd->black_hole, M_DEVBUF);
ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
-#ifdef NEEDS_MORE_TESTING
+#if NEEDS_MORE_TESTING
/*
* Always enable abort on incoming L_Qs if this feature is
* supported. We use this to catch invalid SCB references.
if (match != 0)
match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
if (match != 0) {
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
int group;
group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
/* Make sure the sequencer is in a safe location. */
ahd_clear_critical_section(ahd);
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
if ((ahd->flags & AHD_TARGETROLE) != 0) {
ahd_run_tqinfifo(ahd, /*paused*/TRUE);
}
}
break;
-#ifdef AIC7XXX_NOT_YET
+#if AIC7XXX_NOT_YET
case TRACEPOINT2:
{
printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no,
printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
"Programmed I/O.\n");
iounmap((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
- temp_p->maddr = NULL;
+ temp_p->maddr = 0;
if(temp_p->base == 0)
{
printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
temp_p->pause = hcntrl | PAUSE | INTEN;
temp_p->base = base;
temp_p->mbase = 0;
- temp_p->maddr = NULL;
+ temp_p->maddr = 0;
temp_p->pci_bus = 0;
temp_p->pci_device_fn = slot;
aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
- void *sg_list[pHba->sg_tablesize];
+ ulong sg_list[pHba->sg_tablesize];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
u32 rcode = 0;
- void *p = NULL;
+ ulong p = 0;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
- p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
- if(!p) {
+ p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
+ if(p == 0) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
rcode = -ENOMEM;
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
// TODO 64bit fix
- if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
+ if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
//TODO 64bit fix
- sg[i].addr_bus = (u32)virt_to_bus(p);
+ sg[i].addr_bus = (u32)virt_to_bus((void*)p);
}
}
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
- if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
+ if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
+ printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
}
while(sg_index) {
if(sg_list[--sg_index]) {
if (rcode != -ETIME && rcode != -EINTR)
- kfree(sg_list[sg_index]);
+ kfree((void*)(sg_list[sg_index]));
}
}
return rcode;
u32 base;
int i;
-#ifdef CHECKPAL
+#if CHECKPAL
u8 pal1, pal2, pal3;
#endif
if (EISAbases[i]) { /* Still a possibility ? */
base = 0x1c88 + (i * 0x1000);
-#ifdef CHECKPAL
+#if CHECKPAL
pal1 = inb((u16) base - 8);
pal2 = inb((u16) base - 7);
pal3 = inb((u16) base - 6);
}
/* Nothing found here so we take it from the list */
EISAbases[i] = 0;
-#ifdef CHECKPAL
+#if CHECKPAL
}
#endif
}
EISAbases[x] = 0;
}
}
-#ifdef CHECK_BLINK
+#if CHECK_BLINK
else if (check_blink_state(base)) {
printk("eata_pio: HBA is in BLINK state.\n" "Consult your HBAs manual to correct this.\n");
}
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
+static inline void dma_clear(struct NCR_ESP *esp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
* via PIO.
*/
-static inline void dma_clear(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
- dregs->ctrl_reg = ctrl_data;
-
- t = (unsigned long *)(esp->edev);
-
- dregs->clear_strobe = 0;
- *t = 0 ;
-}
-
/***************************************************************** Detection */
int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
{
dregs->ctrl_reg = ctrl_data;
}
+static inline void dma_clear(struct NCR_ESP *esp)
+{
+ struct fastlane_dma_registers *dregs =
+ (struct fastlane_dma_registers *) (esp->dregs);
+ unsigned long *t;
+
+ ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
+ dregs->ctrl_reg = ctrl_data;
+
+ t = (unsigned long *)(esp->edev);
+
+ dregs->clear_strobe = 0;
+ *t = 0 ;
+}
+
static void dma_ints_off(struct NCR_ESP *esp)
{
static int fdomain_isa_detect( int *irq, int *iobase )
{
-#ifndef PCMCIA
int i, j;
int base = 0xdeadbeef;
int flag = 0;
*iobase = base;
return 1; /* success */
-#else
- return 0;
-#endif
}
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
if (!(overrides[current_override].NCR5380_map_name))
continue;
- ports = NULL;
+ ports = 0;
switch (overrides[current_override].board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA;
.drives = LIST_HEAD_INIT(idescsi_driver.drives),
};
-static int ide_scsi_warned;
-
static int idescsi_ide_open(struct inode *inode, struct file *filp)
{
ide_drive_t *drive = inode->i_bdev->bd_disk->private_data;
drive->usage++;
- if (!ide_scsi_warned++) {
- printk(KERN_WARNING "ide-scsi: Warning this device driver is only intended for specialist devices.\n");
- printk(KERN_WARNING "ide-scsi: Do not use for cd burning, use /dev/hdX directly instead.\n");
- }
return 0;
}
unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
- return generic_ide_ioctl(file, bdev, cmd, arg);
+ return generic_ide_ioctl(bdev, cmd, arg);
}
static struct block_device_operations idescsi_ops = {
return -ENODEV;
}
-static int imm_adjust_queue(struct scsi_device *device)
-{
- blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
- return 0;
-}
-
static struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
- .slave_alloc = imm_adjust_queue,
};
/***************************************************************************
static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
/* filled in by 'insmod' */
-static char *setup_strings;
+static char *setup_strings = 0;
+#ifdef MODULE_PARM
MODULE_PARM(setup_strings, "s");
+#endif
static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
{
*/
cmd = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
+ prev = 0;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
*/
tmp = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
+ prev = 0;
while (tmp) {
if (tmp == cmd) {
if (prev)
*/
if (!done_setup && setup_strings)
- in2000_setup(setup_strings, NULL);
+ in2000_setup(setup_strings, 0);
detect_count = 0;
for (bios = 0; bios_tab[bios]; bios++) {
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
- case ATA_PROT_ATAPI:
/* check status of DMA engine */
host_stat = ata_bmdma_status(ap);
VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
}
free_irq(host_set->irq, host_set);
- if (host_set->ops->host_stop)
- host_set->ops->host_stop(host_set);
if (host_set->mmio_base)
iounmap(host_set->mmio_base);
+ if (host_set->ops->host_stop)
+ host_set->ops->host_stop(host_set);
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
*/
if (cp == tp->nego_cp)
- tp->nego_cp = NULL;
+ tp->nego_cp = 0;
/*
** If auto-sense performed, change scsi status.
if (cp == lp->held_ccb) {
xpt_que_splice(&lp->skip_ccbq, &lp->wait_ccbq);
xpt_que_init(&lp->skip_ccbq);
- lp->held_ccb = NULL;
+ lp->held_ccb = 0;
}
}
} else {
script_ofs = dsp;
script_size = 0;
- script_base = NULL;
+ script_base = 0;
script_name = "mem";
}
if (!(cmd & 6)) {
cp = np->header.cp;
if (CCB_PHYS(cp, phys) != dsa)
- cp = NULL;
+ cp = 0;
} else {
cp = np->ccb;
while (cp && (CCB_PHYS (cp, phys) != dsa))
** try to find the interrupted script command,
** and the address at which to continue.
*/
- vdsp = NULL;
+ vdsp = 0;
nxtdsp = 0;
if (dsp > np->p_script &&
dsp <= np->p_script + sizeof(struct script)) {
u_char scntl3;
u_char chg, ofs, per, fak, wide;
u_char num = INB (nc_dsps);
- struct ccb *cp=NULL;
+ struct ccb *cp=0;
u_long dsa = INL (nc_dsa);
u_char target = INB (nc_sdid) & 0x0f;
struct tcb *tp = &np->target[target];
if (cp->magic) {
PRINT_LUN(np, tn, ln);
printk ("ccb free list corrupted (@%p)\n", cp);
- cp = NULL;
+ cp = 0;
}
else {
xpt_insque_tail(qp, &lp->wait_ccbq);
{
struct tcb *tp = &np->target[tn];
struct lcb *lp = tp->lp[ln];
- struct ccb *cp = NULL;
+ struct ccb *cp = 0;
/*
** Allocate memory for this CCB.
NCR_LOCK_NCB(np, flags);
ncr_exception(np);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
NCR_LOCK_NCB(np, flags);
ncr_timeout(np);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
if (done_list) {
sts = ncr_reset_bus(np, cmd, 1);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
sts = ncr_abort_command(np, cmd);
out:
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
#endif
- cmd->next_wcmd = NULL;
+ cmd->next_wcmd = 0;
if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
else {
while ((wcmd->next_wcmd) != 0)
if (cmd == *pcmd) {
if (to_remove) {
*pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = NULL;
+ cmd->next_wcmd = 0;
}
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
}
pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
}
- return NULL;
+ return 0;
}
static void process_waiting_list(struct ncb *np, int sts)
struct scsi_cmnd *waiting_list, *wcmd;
waiting_list = np->waiting_list;
- np->waiting_list = NULL;
+ np->waiting_list = 0;
#ifdef DEBUG_WAITING_LIST
if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
#endif
while ((wcmd = waiting_list) != 0) {
waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
- wcmd->next_wcmd = NULL;
+ wcmd->next_wcmd = 0;
if (sts == DID_OK) {
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
int length, int func)
{
struct host_data *host_data;
- struct ncb *ncb = NULL;
+ struct ncb *ncb = 0;
int retv;
#ifdef DEBUG_PROC_INFO
**==========================================================
*/
#ifdef MODULE
-char *ncr53c8xx; /* command line passed by insmod */
+char *ncr53c8xx = 0; /* command line passed by insmod */
MODULE_PARM(ncr53c8xx, "s");
#endif
int unit, struct ncr_device *device)
{
struct host_data *host_data;
- struct ncb *np = NULL;
- struct Scsi_Host *instance = NULL;
+ struct ncb *np = 0;
+ struct Scsi_Host *instance = 0;
u_long flags = 0;
int i;
thislength = pos - (buffer + offset);
if(thislength < 0) {
- *start = NULL;
+ *start = 0;
return 0;
}
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
int dev = TAPE_NR(inode);
int mode = TAPE_MODE(inode);
- nonseekable_open(inode, filp);
write_lock(&os_scsi_tapes_lock);
if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
(STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
qla1280_req_pkt(struct scsi_qla_host *ha)
{
struct device_reg *reg = ha->iobase;
- request_t *pkt = NULL;
+ request_t *pkt = 0;
int cnt;
uint32_t timer;
{
struct device_reg *reg = ha->iobase;
struct response *pkt;
- struct srb *sp = NULL;
+ struct srb *sp = 0;
uint16_t mailbox[MAILBOX_REGISTER_COUNT];
uint16_t *wptr;
uint32_t index;
if (index < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[index];
else
- sp = NULL;
+ sp = 0;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = NULL;
+ ha->outstanding_cmds[index] = 0;
/* Save ISP completion status */
CMD_RESULT(sp->cmd) = 0;
}
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = NULL;
+ ha->outstanding_cmds[handle] = 0;
cmd = sp->cmd;
if (handle < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[handle];
else
- sp = NULL;
+ sp = 0;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = NULL;
+ ha->outstanding_cmds[handle] = 0;
/* Bad payload or header */
if (pkt->entry_status & (BIT_3 + BIT_2)) {
sp = ha->outstanding_cmds[index];
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = NULL;
+ ha->outstanding_cmds[index] = 0;
if (ha->actthreads)
ha->actthreads--;
/* Validate handle. */
if (pkt->handle < MAX_OUTSTANDING_COMMANDS) {
sp = ha->outstanding_cmds[pkt->handle];
- ha->outstanding_cmds[pkt->handle] = NULL;
+ ha->outstanding_cmds[pkt->handle] = 0;
} else
sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle] = NULL;
+ ha->outstanding_cmds[pkt->handle] = 0;
if (ha->actthreads)
ha->actthreads--;
sp->lun_queue->out_cnt--;
CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle1] = NULL;
+ ha->outstanding_cmds[pkt->handle1] = 0;
add_to_done_queue(ha, sp);
}
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
- * 0.02
- * - Added support for CK804 SATA controller.
- *
- * 0.01
- * - Initial revision.
*/
#include <linux/config.h>
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "0.02"
+#define DRV_VERSION "0.01"
#define NV_PORTS 2
#define NV_PIO_MASK 0x1f
#define NV_PORT1_SCR_REG_OFFSET 0x40
#define NV_INT_STATUS 0x10
-#define NV_INT_STATUS_CK804 0x440
#define NV_INT_STATUS_PDEV_INT 0x01
#define NV_INT_STATUS_PDEV_PM 0x02
#define NV_INT_STATUS_PDEV_ADDED 0x04
NV_INT_STATUS_SDEV_HOTPLUG)
#define NV_INT_ENABLE 0x11
-#define NV_INT_ENABLE_CK804 0x441
#define NV_INT_ENABLE_PDEV_MASK 0x01
#define NV_INT_ENABLE_PDEV_PM 0x02
#define NV_INT_ENABLE_PDEV_ADDED 0x04
#define NV_INT_CONFIG 0x12
#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI
-// For PCI config register 20
-#define NV_MCP_SATA_CFG_20 0x50
-#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04
-
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_host_stop (struct ata_host_set *host_set);
-static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
-static void nv_disable_hotplug(struct ata_host_set *host_set);
-static void nv_check_hotplug(struct ata_host_set *host_set);
-static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
-static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
-
-enum nv_host_type
-{
- NFORCE2,
- NFORCE3,
- CK804
-};
static struct pci_device_id nv_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0, } /* terminate list */
};
-#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
-
-struct nv_host_desc
-{
- enum nv_host_type host_type;
- unsigned long host_flags;
- void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
- void (*disable_hotplug)(struct ata_host_set *host_set);
- void (*check_hotplug)(struct ata_host_set *host_set);
-
-};
-static struct nv_host_desc nv_device_tbl[] = {
- {
- .host_type = NFORCE2,
- .host_flags = 0x00000000,
- .enable_hotplug = nv_enable_hotplug,
- .disable_hotplug= nv_disable_hotplug,
- .check_hotplug = nv_check_hotplug,
- },
- {
- .host_type = NFORCE3,
- .host_flags = 0x00000000,
- .enable_hotplug = nv_enable_hotplug,
- .disable_hotplug= nv_disable_hotplug,
- .check_hotplug = nv_check_hotplug,
- },
- { .host_type = CK804,
- .host_flags = NV_HOST_FLAGS_SCR_MMIO,
- .enable_hotplug = nv_enable_hotplug_ck804,
- .disable_hotplug= nv_disable_hotplug_ck804,
- .check_hotplug = nv_check_hotplug_ck804,
- },
-};
-
-struct nv_host
-{
- struct nv_host_desc *host_desc;
-};
-
static struct pci_driver nv_pci_driver = {
.name = DRV_NAME,
.id_table = nv_pci_tbl,
irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
- struct nv_host *host = host_set->private_data;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
+ u8 intr_status;
+ u8 intr_enable;
spin_lock_irqsave(&host_set->lock, flags);
handled += ata_host_intr(ap, qc);
}
- }
+ intr_status = inb(ap->ioaddr.scr_addr + NV_INT_STATUS);
+ intr_enable = inb(ap->ioaddr.scr_addr + NV_INT_ENABLE);
+
+ // Clear interrupt status.
+ outb(0xff, ap->ioaddr.scr_addr + NV_INT_STATUS);
+
+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
+ if (intr_status & NV_INT_STATUS_PDEV_ADDED) {
+ printk(KERN_WARNING "ata%u: "
+ "Primary device added\n", ap->id);
+ }
+
+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED) {
+ printk(KERN_WARNING "ata%u: "
+ "Primary device removed\n", ap->id);
+ }
- if (host->host_desc->check_hotplug)
- host->host_desc->check_hotplug(host_set);
+ if (intr_status & NV_INT_STATUS_SDEV_ADDED) {
+ printk(KERN_WARNING "ata%u: "
+ "Secondary device added\n", ap->id);
+ }
+
+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED) {
+ printk(KERN_WARNING "ata%u: "
+ "Secondary device removed\n", ap->id);
+ }
+ }
+ }
spin_unlock_irqrestore(&host_set->lock, flags);
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
- struct ata_host_set *host_set = ap->host_set;
- struct nv_host *host = host_set->private_data;
-
if (sc_reg > SCR_CONTROL)
return 0xffffffffU;
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO)
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
- else
- return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
- struct ata_host_set *host_set = ap->host_set;
- struct nv_host *host = host_set->private_data;
-
if (sc_reg > SCR_CONTROL)
return;
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO)
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
- else
- outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void nv_host_stop (struct ata_host_set *host_set)
{
- struct nv_host *host = host_set->private_data;
+ int i;
- // Disable hotplug event interrupts.
- if (host->host_desc->disable_hotplug)
- host->host_desc->disable_hotplug(host_set);
+ for (i=0; i<host_set->n_ports; i++) {
+ u8 intr_mask;
- kfree(host);
+ // Disable hotplug event interrupts.
+ intr_mask = inb(host_set->ports[i]->ioaddr.scr_addr +
+ NV_INT_ENABLE);
+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
+ outb(intr_mask, host_set->ports[i]->ioaddr.scr_addr +
+ NV_INT_ENABLE);
+ }
}
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version = 0;
- struct nv_host *host;
struct ata_probe_ent *probe_ent = NULL;
+ int i;
int rc;
if (!printed_version++)
goto err_out_regions;
}
- host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
- if (!host) {
- rc = -ENOMEM;
- goto err_out_free_ent;
- }
-
- host->host_desc = &nv_device_tbl[ent->driver_data];
-
memset(probe_ent, 0, sizeof(*probe_ent));
INIT_LIST_HEAD(&probe_ent->node);
ATA_FLAG_SATA_RESET |
ATA_FLAG_SRST |
ATA_FLAG_NO_LEGACY;
-
probe_ent->port_ops = &nv_ops;
probe_ent->n_ports = NV_PORTS;
probe_ent->irq = pdev->irq;
pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
probe_ent->port[0].bmdma_addr =
pci_resource_start(pdev, 4) | NV_PORT0_BMDMA_REG_OFFSET;
+ probe_ent->port[0].scr_addr =
+ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
ata_std_ports(&probe_ent->port[1]);
pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
probe_ent->port[1].bmdma_addr =
pci_resource_start(pdev, 4) | NV_PORT1_BMDMA_REG_OFFSET;
+ probe_ent->port[1].scr_addr =
+ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
- probe_ent->private_data = host;
-
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
- unsigned long base;
+ pci_set_master(pdev);
- probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
- pci_resource_len(pdev, 5));
- if (probe_ent->mmio_base == NULL)
- goto err_out_free_ent;
+ rc = ata_device_add(probe_ent);
+ if (rc != NV_PORTS)
+ goto err_out_regions;
- base = (unsigned long)probe_ent->mmio_base;
+ // Enable hotplug event interrupts.
+ for (i=0; i<probe_ent->n_ports; i++) {
+ u8 intr_mask;
- probe_ent->port[0].scr_addr =
- base + NV_PORT0_SCR_REG_OFFSET;
- probe_ent->port[1].scr_addr =
- base + NV_PORT1_SCR_REG_OFFSET;
- } else {
+ outb(NV_INT_STATUS_HOTPLUG, probe_ent->port[i].scr_addr +
+ NV_INT_STATUS);
- probe_ent->port[0].scr_addr =
- pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
- probe_ent->port[1].scr_addr =
- pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
+ intr_mask = inb(probe_ent->port[i].scr_addr + NV_INT_ENABLE);
+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
+ outb(intr_mask, probe_ent->port[i].scr_addr + NV_INT_ENABLE);
}
- pci_set_master(pdev);
-
- // Enable hotplug event interrupts.
- if (host->host_desc->enable_hotplug)
- host->host_desc->enable_hotplug(probe_ent);
-
- rc = ata_device_add(probe_ent);
- if (rc != NV_PORTS)
- goto err_out_free_ent;
-
kfree(probe_ent);
return 0;
-err_out_free_ent:
- kfree(probe_ent);
-
err_out_regions:
pci_release_regions(pdev);
return rc;
}
-static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
-{
- u8 intr_mask;
-
- outb(NV_INT_STATUS_HOTPLUG,
- (unsigned long)probe_ent->mmio_base + NV_INT_STATUS);
-
- intr_mask = inb((unsigned long)probe_ent->mmio_base + NV_INT_ENABLE);
- intr_mask |= NV_INT_ENABLE_HOTPLUG;
-
- outb(intr_mask, (unsigned long)probe_ent->mmio_base + NV_INT_ENABLE);
-}
-
-static void nv_disable_hotplug(struct ata_host_set *host_set)
-{
- u8 intr_mask;
-
- intr_mask = inb((unsigned long)host_set->mmio_base + NV_INT_ENABLE);
-
- intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-
- outb(intr_mask, (unsigned long)host_set->mmio_base + NV_INT_ENABLE);
-}
-
-static void nv_check_hotplug(struct ata_host_set *host_set)
-{
- u8 intr_status;
-
- intr_status = inb((unsigned long)host_set->mmio_base + NV_INT_STATUS);
-
- // Clear interrupt status.
- outb(0xff, (unsigned long)host_set->mmio_base + NV_INT_STATUS);
-
- if (intr_status & NV_INT_STATUS_HOTPLUG) {
- if (intr_status & NV_INT_STATUS_PDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device added\n");
-
- if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device removed\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device added\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device removed\n");
- }
-}
-
-static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
-{
- u8 intr_mask;
- u8 regval;
-
- pci_read_config_byte(probe_ent->pdev, NV_MCP_SATA_CFG_20, ®val);
- regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
- pci_write_config_byte(probe_ent->pdev, NV_MCP_SATA_CFG_20, regval);
-
- writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
-
- intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
- intr_mask |= NV_INT_ENABLE_HOTPLUG;
-
- writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-}
-
-static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
-{
- u8 intr_mask;
- u8 regval;
-
- intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
-
- intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-
- writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
-
- pci_read_config_byte(host_set->pdev, NV_MCP_SATA_CFG_20, ®val);
- regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
- pci_write_config_byte(host_set->pdev, NV_MCP_SATA_CFG_20, regval);
-}
-
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
-{
- u8 intr_status;
-
- intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
-
- // Clear interrupt status.
- writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
-
- if (intr_status & NV_INT_STATUS_HOTPLUG) {
- if (intr_status & NV_INT_STATUS_PDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device added\n");
-
- if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device removed\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device added\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device removed\n");
- }
-}
-
static int __init nv_init(void)
{
return pci_module_init(&nv_pci_driver);
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdp, cmd, p);
default:
- error = scsi_cmd_ioctl(filp, disk, cmd, p);
+ error = scsi_cmd_ioctl(disk, cmd, p);
if (error != -ENOTTY)
return error;
}
int res;
int retval;
- nonseekable_open(inode, filp);
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
sdp = sg_get_dev(dev);
if ((!sdp) || (!sdp->device))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
sdp->disk->disk_name, (int) count));
+ if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_WRITE, buf, count)))
return k;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
+ if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_READ, buf, count)))
return k; /* protects following copy_from_user()s + get_user()s */
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdev, cmd, (void __user *)arg);
}
- return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+ return cdrom_ioctl(&cd->cdi, inode, cmd, arg);
}
static int sr_block_media_changed(struct gendisk *disk)
""
};
+ /* Set read only initially */
+ set_disk_ro(cd->disk, 1);
+
/* allocate a request for the TEST_UNIT_READY */
SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
if (!SRpnt) {
if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) !=
(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) {
cd->device->writeable = 1;
+ set_disk_ro(cd->disk, 0);
}
scsi_release_request(SRpnt);
int dev = TAPE_NR(inode);
char *name;
- nonseekable_open(inode, filp);
write_lock(&st_dev_arr_lock);
if (dev >= st_dev_max || scsi_tapes == NULL ||
((STp = scsi_tapes[dev]) == NULL)) {
}
\f
/* The checks common to both reading and writing */
-static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count)
+static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t *ppos)
{
ssize_t retval = 0;
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count);
+ retval = rw_checks(STp, filp, count, ppos);
if (retval || count == 0)
goto out;
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count);
+ retval = rw_checks(STp, filp, count, ppos);
if (retval || count == 0)
goto out;
case SCSI_IOCTL_GET_BUS_NUMBER:
break;
default:
- i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p);
+ i = scsi_cmd_ioctl(STp->disk, cmd_in, p);
if (i != -ENOTTY)
return i;
break;
for (i = 0; i < st_dev_max; i++) {
tpnt = scsi_tapes[i];
if (tpnt != NULL && tpnt->device == SDp) {
- scsi_tapes[i] = NULL;
+ scsi_tapes[i] = 0;
st_nr_dev--;
write_unlock(&st_dev_arr_lock);
devfs_unregister_tape(tpnt->disk->number);
return &sym_fw1;
#endif
else
- return NULL;
+ return 0;
}
/*
}
/* Revert everything */
- SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+ SYM_UCMD_PTR(cmd)->eh_wait = 0;
cmd->scsi_done = ep->old_done;
/* Wake up the eh thread if it wants to sleep */
/* On error, restore everything and cross fingers :) */
if (sts) {
- SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+ SYM_UCMD_PTR(cmd)->eh_wait = 0;
cmd->scsi_done = ep->old_done;
to_do = SYM_EH_DO_IGNORE;
}
char **start, off_t offset, int length, int func)
{
struct host_data *host_data;
- struct sym_hcb *np = NULL;
+ struct sym_hcb *np = 0;
int retv;
host_data = (struct host_data *) host->hostdata;
static struct sym_driver_setup
sym_driver_safe_setup __initdata = SYM_LINUX_DRIVER_SAFE_SETUP;
#ifdef MODULE
-char *sym53c8xx; /* command line passed by insmod */
+char *sym53c8xx = 0; /* command line passed by insmod */
MODULE_PARM(sym53c8xx, "s");
#endif
static __inline m_addr_t sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
{
- void *vaddr = NULL;
+ void *vaddr = 0;
dma_addr_t baddr = 0;
vaddr = pci_alloc_consistent(mp->dev_dmat,SYM_MEM_CLUSTER_SIZE, &baddr);
} else {
script_ofs = dsp;
script_size = 0;
- script_base = NULL;
+ script_base = 0;
script_name = "mem";
}
return chip;
}
- return NULL;
+ return 0;
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2
* try to find the interrupted script command,
* and the address at which to continue.
*/
- vdsp = NULL;
+ vdsp = 0;
nxtdsp = 0;
if (dsp > np->scripta_ba &&
dsp <= np->scripta_ba + np->scripta_sz) {
* we are not in race.
*/
i = 0;
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_BUSY &&
* abort for this target.
*/
i = 0;
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_DISCONNECT)
else if (dp_scr == SCRIPTA_BA (np, pm1_data))
pm = &cp->phys.pm1;
else
- pm = NULL;
+ pm = 0;
if (pm) {
dp_scr = scr_to_cpu(pm->ret);
* used for negotiation, clear this info in the tcb.
*/
if (cp == tp->nego_cp)
- tp->nego_cp = NULL;
+ tp->nego_cp = 0;
#ifdef SYM_CONF_IARB_SUPPORT
/*
/*
* Make this CCB available.
*/
- cp->cam_ccb = NULL;
+ cp->cam_ccb = 0;
cp->host_status = HS_IDLE;
sym_remque(&cp->link_ccbq);
sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
*/
static ccb_p sym_alloc_ccb(hcb_p np)
{
- ccb_p cp = NULL;
+ ccb_p cp = 0;
int hcode;
/*
* queue to the controller.
*/
if (np->actccbs >= SYM_CONF_MAX_START)
- return NULL;
+ return 0;
/*
* Allocate memory for this CCB.
sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
sym_mfree_dma(cp, sizeof(*cp), "CCB");
}
- return NULL;
+ return 0;
}
/*
* allocation for not probed LUNs.
*/
if (!sym_is_bit(tp->lun_map, ln))
- return NULL;
+ return 0;
/*
* Initialize the target control block if not yet.
lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
if (!lp->cb_tags) {
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
- lp->itlq_tbl = NULL;
+ lp->itlq_tbl = 0;
goto fail;
}
/*
* Look up our CCB control block.
*/
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp2->cam_ccb == ccb) {
* LUN(s) > 0.
*/
#if SYM_CONF_MAX_LUN <= 1
-#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : NULL
+#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0
#else
#define sym_lp(np, tp, lun) \
- (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
+ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
#endif
/*
m_link_p h = mp->h;
if (size > SYM_MEM_CLUSTER_SIZE)
- return NULL;
+ return 0;
while (size > s) {
s <<= 1;
if (s == SYM_MEM_CLUSTER_SIZE) {
h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
if (h[j].next)
- h[j].next->next = NULL;
+ h[j].next->next = 0;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_p) (a+s);
- h[j].next->next = NULL;
+ h[j].next->next = 0;
}
}
#ifdef DEBUG
#ifdef SYM_MEM_FREE_UNUSED
static struct sym_m_pool mp0 =
- {NULL, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
+ {0, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
#else
static struct sym_m_pool mp0 =
- {NULL, ___mp0_get_mem_cluster};
+ {0, ___mp0_get_mem_cluster};
#endif
/*
/* Create a new memory DMAable pool (when fetch failed) */
static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
{
- m_pool_p mp = NULL;
+ m_pool_p mp = 0;
mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
if (mp) {
}
if (mp)
__sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
- return NULL;
+ return 0;
}
#ifdef SYM_MEM_FREE_UNUSED
void *__sym_calloc_dma_unlocked(m_pool_ident_t dev_dmat, int size, char *name)
{
m_pool_p mp;
- void *m = NULL;
+ void *m = 0;
mp = ___get_dma_pool(dev_dmat);
if (!mp)
{
m_pool_p mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_p vp = NULL;
+ m_vtob_p vp = 0;
m_addr_t a = ((m_addr_t) m) & ~SYM_MEM_CLUSTER_MASK;
mp = ___get_dma_pool(dev_dmat);
if (elem != head)
__sym_que_del(head, elem->flink);
else
- elem = NULL;
+ elem = 0;
return elem;
}
u_char *gpcntl)
{
OUTB (nc_gpcntl, *gpcntl & 0xfe);
- S24C16_do_bit(np, NULL, write_bit, gpreg);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
OUTB (nc_gpcntl, *gpcntl);
}
int x;
for (x = 0; x < 8; x++)
- S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
S24C16_read_ack(np, ack_data, gpreg, gpcntl);
}
if (elem != head)
__xpt_que_del(head, elem->flink);
else
- elem = NULL;
+ elem = 0;
return elem;
}
m_link_s *h = mp->h;
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
- return NULL;
+ return 0;
while (size > s) {
s <<= 1;
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
h[j].next = (m_link_s *) M_GETP();
if (h[j].next)
- h[j].next->next = NULL;
+ h[j].next->next = 0;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_s *) (a+s);
- h[j].next->next = NULL;
+ h[j].next->next = 0;
}
}
#ifdef DEBUG
--mp->nump;
}
-static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
/*
* DMAable pools.
{
u_long flags;
struct m_pool *mp;
- void *m = NULL;
+ void *m = 0;
NCR_LOCK_DRIVER(flags);
mp = ___get_dma_pool(bush);
u_long flags;
m_pool_s *mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_s *vp = NULL;
+ m_vtob_s *vp = 0;
m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
NCR_LOCK_DRIVER(flags);
pdev = pACB->pdev;
pci_read_config_word(pdev, PCI_STATUS, &pstat);
printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
- printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
+ printk ("DC390: In case of driver trouble read linux/Documentation/scsi/tmscsim.txt\n");
}
return SCSI_ABORT_NOT_RUNNING;
if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
- config.mscp[mscp_index].SCint = NULL;
+ config.mscp[mscp_index].SCint = 0;
done = config.mscp[mscp_index].done;
- config.mscp[mscp_index].done = NULL;
+ config.mscp[mscp_index].done = 0;
SCpnt->result = DID_ABORT << 16;
/* Take the host lock to guard against scsi layer re-entry */
{
config.mscp[i].SCint->result = DID_RESET << 16;
config.mscp[i].done(config.mscp[i].SCint);
- config.mscp[i].done = NULL;
+ config.mscp[i].done = 0;
}
- config.mscp[i].SCint = NULL;
+ config.mscp[i].SCint = 0;
}
#endif
if (icm_status == 3) {
void (*done)(Scsi_Cmnd *) = mscp->done;
if (done) {
- mscp->done = NULL;
+ mscp->done = 0;
mscp->SCint->result = DID_ABORT << 16;
done(mscp->SCint);
}
once we call done, we may get another command queued before this
interrupt service routine can return. */
done = mscp->done;
- mscp->done = NULL;
+ mscp->done = 0;
/* Let the higher levels know that we're done */
switch (mscp->adapter_status)
SCtmp->result = status | mscp->target_status;
- SCtmp->host_scribble = NULL;
+ SCtmp->host_scribble = 0;
/* Free up mscp block for next command */
#if ULTRASTOR_MAX_CMDS == 1
static int __devinit pci_xircom_init(struct pci_dev *dev)
{
- msleep(100);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/10);
return 0;
}
# The new 8250/16550 serial drivers
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
- depends on (BROKEN || !SPARC64)
select SERIAL_CORE
---help---
This selects whether you want to include the driver for the standard
help
Select the is option to use SMC2 as a serial port
-config SERIAL_SGI_L1_CONSOLE
- bool "SGI Altix L1 serial console support"
- depends on IA64_GENERIC || IA64_SGI_SN2
- select SERIAL_CORE
- help
- If you have an SGI Altix and you would like to use the system
- controller serial port as your console (you want this!),
- say Y. Otherwise, say N.
-
-config SERIAL_MPC52xx
- tristate "Freescale MPC52xx family PSC serial support"
- depends on PPC_MPC52xx
- select SERIAL_CORE
- help
- This drivers support the MPC52xx PSC serial ports. If you would
- like to use them, you must answer Y or M to this option. Not that
- for use as console, it must be included in kernel and not as a
- module.
-
-config SERIAL_MPC52xx_CONSOLE
- bool "Console on a Freescale MPC52xx family PSC serial port"
- depends on SERIAL_MPC52xx=y
- select SERIAL_CORE_CONSOLE
- help
- Select this options if you'd like to use one of the PSC serial port
- of the Freescale MPC52xx family as a console.
-
-config SERIAL_MPC52xx_CONSOLE_BAUD
- int "Freescale MPC52xx family PSC serial port baud"
- depends on SERIAL_MPC52xx_CONSOLE=y
- default "9600"
- help
- Select the MPC52xx console baud rate.
- This value is only used if the bootloader doesn't pass in the
- console baudrate
-
endmenu
+
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_BAST_SIO) += bast_sio.o
-obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
-obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
/**************************************************************/
static int cpm_uart_tx_pump(struct uart_port *port);
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo);
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo);
-static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval);
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int sbits, u16 sval);
/**************************************************************/
pr_debug("CPM uart[%d]:start tx\n", port->line);
+ /* if in the middle of discarding return */
+ if (IS_DISCARDING(pinfo))
+ return;
+
if (IS_SMC(pinfo)) {
if (smcp->smc_smcm & SMCM_TX)
return;
static int cpm_uart_startup(struct uart_port *port)
{
int retval;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
pr_debug("CPM uart[%d]:startup\n", port->line);
if (retval)
return retval;
- /* Startup rx-int */
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm |= SMCM_RX;
- pinfo->smcp->smc_smcmr |= SMCMR_REN;
- } else {
- pinfo->sccp->scc_sccm |= UART_SCCM_RX;
- }
-
return 0;
}
}
/* Shut them really down and reinit buffer descriptors */
- cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
- cpm_uart_initbd(pinfo);
+ cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
}
}
{
int baud;
unsigned long flags;
- u16 cval, scval, prev_mode;
+ u16 cval, scval;
int bits, sbits;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
- volatile smc_t *smcp = pinfo->smcp;
- volatile scc_t *sccp = pinfo->sccp;
+ int line = pinfo - cpm_uart_ports;
+ volatile cbd_t *bdp;
pr_debug("CPM uart[%d]:set_termios\n", port->line);
+ spin_lock_irqsave(&port->lock, flags);
+ /* disable uart interrupts */
+ if (IS_SMC(pinfo))
+ pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
+ else
+ pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
+ pinfo->flags |= FLAG_DISCARDING;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* if previous configuration exists wait for tx to finish */
+ if (pinfo->baud != 0 && pinfo->bits != 0) {
+
+ /* point to the last txed bd */
+ bdp = pinfo->tx_cur;
+ if (bdp == pinfo->tx_bd_base)
+ bdp = pinfo->tx_bd_base + (pinfo->tx_nrfifos - 1);
+ else
+ bdp--;
+
+ /* wait for it to be transmitted */
+ while ((bdp->cbd_sc & BD_SC_READY) != 0)
+ schedule();
+
+ /* and delay for the hw fifo to drain */
+ udelay((3 * 1000000 * pinfo->bits) / pinfo->baud);
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Send the CPM an initialize command. */
+ cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
+
+ /* Stop uart */
+ if (IS_SMC(pinfo))
+ pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
+ else
+ pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* Send the CPM an initialize command. */
+ cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Character length programmed into the mode register is the
spin_lock_irqsave(&port->lock, flags);
+ cpm_set_brg(pinfo->brg - 1, baud);
+
/* Start bit has not been added (so don't, because we would just
* subtract it later), and we need to add one for the number of
* stops bits (there is always at least one).
*/
bits++;
- if (IS_SMC(pinfo)) {
- /* Set the mode register. We want to keep a copy of the
- * enables, because we want to put them back if they were
- * present.
- */
- prev_mode = smcp->smc_smcmr;
- smcp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
- smcp->smc_smcmr |= (prev_mode & (SMCMR_REN | SMCMR_TEN));
- } else {
- sccp->scc_psmr = (sbits << 12) | scval;
- }
- cpm_set_brg(pinfo->brg - 1, baud);
+ /* re-init */
+ if (IS_SMC(pinfo))
+ cpm_uart_init_smc(pinfo, bits, cval);
+ else
+ cpm_uart_init_scc(pinfo, sbits, scval);
+
+ pinfo->baud = baud;
+ pinfo->bits = bits;
+
+ pinfo->flags &= ~FLAG_DISCARDING;
spin_unlock_irqrestore(&port->lock, flags);
}
return 1;
}
-/*
- * init buffer descriptors
- */
-static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int bits, u16 scval)
{
- int i;
+ int line = pinfo - cpm_uart_ports;
+ volatile scc_t *scp;
+ volatile scc_uart_t *sup;
u8 *mem_addr;
volatile cbd_t *bdp;
+ int i;
- pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line);
+ pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
+
+ scp = pinfo->sccp;
+ sup = pinfo->sccup;
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
+ pinfo->rx_cur = pinfo->rx_bd_base;
mem_addr = pinfo->mem_addr;
- bdp = pinfo->rx_cur = pinfo->rx_bd_base;
- for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) {
+ for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT;
+ bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
mem_addr += pinfo->rx_fifosize;
}
-
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT;
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
- bdp = pinfo->tx_cur = pinfo->tx_bd_base;
- for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) {
+ pinfo->tx_cur = pinfo->tx_bd_base;
+ for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_INTRPT;
+ bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
mem_addr += pinfo->tx_fifosize;
+ bdp++;
}
-
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_WRAP | BD_SC_INTRPT;
-}
-
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo)
-{
- int line = pinfo - cpm_uart_ports;
- volatile scc_t *scp;
- volatile scc_uart_t *sup;
-
- pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
-
- scp = pinfo->sccp;
- sup = pinfo->sccup;
/* Store address */
pinfo->sccup->scc_genscc.scc_rbase = (unsigned char *)pinfo->rx_bd_base - DPRAM_BASE;
(SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
/* Enable rx interrupts and clear all pending events. */
- scp->scc_sccm = 0;
+ scp->scc_sccm = UART_SCCM_RX;
scp->scc_scce = 0xffff;
scp->scc_dsr = 0x7e7e;
- scp->scc_psmr = 0x3000;
+ scp->scc_psmr = (bits << 12) | scval;
scp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval)
{
int line = pinfo - cpm_uart_ports;
volatile smc_t *sp;
volatile smc_uart_t *up;
+ volatile u8 *mem_addr;
+ volatile cbd_t *bdp;
+ int i;
pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line);
sp = pinfo->smcp;
up = pinfo->smcup;
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr;
+ pinfo->rx_cur = pinfo->rx_bd_base;
+ for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ mem_addr += pinfo->rx_fifosize;
+ }
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
+ pinfo->tx_cur = pinfo->tx_bd_base;
+ for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ mem_addr += pinfo->tx_fifosize;
+ }
+
/* Store address */
pinfo->smcup->smc_rbase = (u_char *)pinfo->rx_bd_base - DPRAM_BASE;
pinfo->smcup->smc_tbase = (u_char *)pinfo->tx_bd_base - DPRAM_BASE;
cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
- /* Set UART mode, 8 bit, no parity, one stop.
- * Enable receive and transmit.
- */
- sp->smc_smcmr = smcr_mk_clen(9) | SMCMR_SM_UART;
+ /* Set UART mode, according to the parameters */
+ sp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
/* Enable only rx interrupts clear all pending events. */
- sp->smc_smcm = 0;
+ sp->smc_smcm = SMCM_RX;
sp->smc_smce = 0xff;
sp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN);
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
- pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
- } else {
- pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
- pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
- }
-
ret = cpm_uart_allocbuf(pinfo, 0);
-
if (ret)
return ret;
- cpm_uart_initbd(pinfo);
-
return 0;
}
volatile cbd_t *bdp, *bdbase;
volatile unsigned char *cp;
+ if (IS_DISCARDING(pinfo))
+ return;
+
/* Get the address of the host memory buffer.
*/
bdp = pinfo->tx_cur;
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
- pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
- } else {
- pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
- pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
- }
-
ret = cpm_uart_allocbuf(pinfo, 1);
-
if (ret)
return ret;
- cpm_uart_initbd(pinfo);
-
- if (IS_SMC(pinfo))
- cpm_uart_init_smc(pinfo);
- else
- cpm_uart_init_scc(pinfo);
-
uart_set_options(port, co, baud, parity, bits, flow);
return 0;
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_offset;
+ uint dp_addr;
u8 *mem_addr;
- dma_addr_t dma_addr = 0;
+ dma_addr_t dma_addr;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_DPERR(dp_offset)) {
+ dp_mem = m8xx_cpm_dpalloc(dpmemsz);
+ if (dp_mem == NULL) {
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_mem = cpm_dpram_addr(dp_offset);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
+ m8xx_cpm_dpfree(dp_mem);
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_offset;
+ pinfo->dp_addr = dp_addr;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- cpm_dpfree(pinfo->dp_addr);
+ m8xx_cpm_dpfree(m8xx_cpm_dpram_addr(pinfo->dp_addr));
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- cpm_setbrg(brg, baud);
+ m8xx_cpm_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_offset;
+ uint dp_addr;
u8 *mem_addr;
dma_addr_t dma_addr = 0;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_DPERR(dp_offset)) {
+ dp_mem = cpm2_dpalloc(dpmemsz, 8);
+ if (dp_mem == NULL) {
printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
+ "cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_mem = cpm_dpram_addr(dp_offset);
+ dp_addr = cpm2_dpram_offset(dp_mem);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
+ cpm2_dpfree(dp_mem);
printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate coherent memory\n");
+ "cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_offset;
+ pinfo->dp_addr = dp_addr;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- cpm_dpfree(pinfo->dp_addr);
+ cpm2_dpfree(&pinfo->dp_addr);
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- cpm_setbrg(brg, baud);
+ cpm2_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
+++ /dev/null
-/*
- * drivers/serial/mpc52xx_uart.c
- *
- * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
- *
- * FIXME According to the usermanual the status bits in the status register
- * are only updated when the peripherals access the FIFO and not when the
- * CPU access them. So since we use this bits to know when we stop writing
- * and reading, they may not be updated in-time and a race condition may
- * exists. But I haven't be able to prove this and I don't care. But if
- * any problem arises, it might worth checking. The TX/RX FIFO Stats
- * registers should be used in addition.
- * Update: Actually, they seem updated ... At least the bits we use.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Some of the code has been inspired/copied from the 2.4 code written
- * by Dale Farnsworth <dfarnsworth@mvista.com>.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/* OCP Usage :
- *
- * This drivers uses the OCP model. To load the serial driver for one of the
- * PSCs, just add this to the core_ocp table :
- *
- * {
- * .vendor = OCP_VENDOR_FREESCALE,
- * .function = OCP_FUNC_PSC_UART,
- * .index = 0,
- * .paddr = MPC52xx_PSC1,
- * .irq = MPC52xx_PSC1_IRQ,
- * .pm = OCP_CPM_NA,
- * },
- *
- * This is for PSC1, replace the paddr and irq according to the PSC you want to
- * use. The driver all necessary registers to place the PSC in uart mode without
- * DCD. However, the pin multiplexing aren't changed and should be set either
- * by the bootloader or in the platform init code.
- * The index field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
- * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so
- * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for
- * the console code : without this 1:1 mapping, at early boot time, when we are
- * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be
- * mapped to because OCP stuff is not yet initialized.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/sysrq.h>
-#include <linux/console.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/ocp.h>
-
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-
-#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/serial_core.h>
-
-
-
-#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
-
-
-static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
- /* Rem: - We use the read_status_mask as a shadow of
- * psc->mpc52xx_psc_imr
- * - It's important that is array is all zero on start as we
- * use it to know if it's initialized or not ! If it's not sure
- * it's cleared, then a memset(...,0,...) should be added to
- * the console_init
- */
-
-#define PSC(port) ((struct mpc52xx_psc *)((port)->membase))
-
-
-/* Forward declaration of the interruption handling routine */
-static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id,struct pt_regs *regs);
-
-
-/* Simple macro to test if a port is console or not. This one is taken
- * for serial_core.c and maybe should be moved to serial_core.h ? */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
-#else
-#define uart_console(port) (0)
-#endif
-
-
-/* ======================================================================== */
-/* UART operations */
-/* ======================================================================== */
-
-static unsigned int
-mpc52xx_uart_tx_empty(struct uart_port *port)
-{
- int status = in_be16(&PSC(port)->mpc52xx_psc_status);
- return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
-}
-
-static void
-mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- /* Not implemented */
-}
-
-static unsigned int
-mpc52xx_uart_get_mctrl(struct uart_port *port)
-{
- /* Not implemented */
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
-}
-
-static void
-mpc52xx_uart_stop_tx(struct uart_port *port, unsigned int tty_stop)
-{
- /* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_start_tx(struct uart_port *port, unsigned int tty_start)
-{
- /* port->lock taken by caller */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
-{
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
-
- port->x_char = ch;
- if (ch) {
- /* Make sure tx interrupts are on */
- /* Truly necessary ??? They should be anyway */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
- }
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void
-mpc52xx_uart_stop_rx(struct uart_port *port)
-{
- /* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_enable_ms(struct uart_port *port)
-{
- /* Not implemented */
-}
-
-static void
-mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
-
- if ( ctl == -1 )
- out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
- else
- out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int
-mpc52xx_uart_startup(struct uart_port *port)
-{
- struct mpc52xx_psc *psc = PSC(port);
-
- /* Reset/activate the port, clear and enable interrupts */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- out_be32(&psc->sicr,0); /* UART mode DCD ignored */
-
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-
- out_8(&psc->rfcntl, 0x00);
- out_be16(&psc->rfalarm, 0x1ff);
- out_8(&psc->tfcntl, 0x07);
- out_be16(&psc->tfalarm, 0x80);
-
- port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
- out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-
- out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-
- return 0;
-}
-
-static void
-mpc52xx_uart_shutdown(struct uart_port *port)
-{
- struct mpc52xx_psc *psc = PSC(port);
-
- /* Shut down the port, interrupt and all */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- port->read_status_mask = 0;
- out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
- struct termios *old)
-{
- struct mpc52xx_psc *psc = PSC(port);
- unsigned long flags;
- unsigned char mr1, mr2;
- unsigned short ctr;
- unsigned int j, baud, quot;
-
- /* Prepare what we're gonna write */
- mr1 = 0;
-
- switch (new->c_cflag & CSIZE) {
- case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS;
- break;
- case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS;
- break;
- case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS;
- break;
- case CS8:
- default: mr1 |= MPC52xx_PSC_MODE_8_BITS;
- }
-
- if (new->c_cflag & PARENB) {
- mr1 |= (new->c_cflag & PARODD) ?
- MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
- } else
- mr1 |= MPC52xx_PSC_MODE_PARNONE;
-
-
- mr2 = 0;
-
- if (new->c_cflag & CSTOPB)
- mr2 |= MPC52xx_PSC_MODE_TWO_STOP;
- else
- mr2 |= ((new->c_cflag & CSIZE) == CS5) ?
- MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
- MPC52xx_PSC_MODE_ONE_STOP;
-
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
- ctr = quot & 0xffff;
-
- /* Get the lock */
- spin_lock_irqsave(&port->lock, flags);
-
- /* Update the per-port timeout */
- uart_update_timeout(port, new->c_cflag, baud);
-
- /* Do our best to flush TX & RX, so we don't loose anything */
- /* But we don't wait indefinitly ! */
- j = 5000000; /* Maximum wait */
- /* FIXME Can't receive chars since set_termios might be called at early
- * boot for the console, all stuff is not yet ready to receive at that
- * time and that just makes the kernel oops */
- /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
- udelay(1);
-
- if (!j)
- printk( KERN_ERR "mpc52xx_uart.c: "
- "Unable to flush RX & TX fifos in-time in set_termios."
- "Some chars may have been lost.\n" );
-
- /* Reset the TX & RX */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- /* Send new mode settings */
- out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
- out_8(&psc->mode,mr1);
- out_8(&psc->mode,mr2);
- out_8(&psc->ctur,ctr >> 8);
- out_8(&psc->ctlr,ctr & 0xff);
-
- /* Reenable TX & RX */
- out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-
- /* We're all set, release the lock */
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *
-mpc52xx_uart_type(struct uart_port *port)
-{
- return port->type == PORT_MPC52xx ? "MPC52xx PSC" : NULL;
-}
-
-static void
-mpc52xx_uart_release_port(struct uart_port *port)
-{
- if (port->flags & UPF_IOREMAP) { /* remapped by us ? */
- iounmap(port->membase);
- port->membase = NULL;
- }
-}
-
-static int
-mpc52xx_uart_request_port(struct uart_port *port)
-{
- if (port->flags & UPF_IOREMAP) /* Need to remap ? */
- port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc));
-
- return port->membase != NULL ? 0 : -EBUSY;
-}
-
-static void
-mpc52xx_uart_config_port(struct uart_port *port, int flags)
-{
- if ( (flags & UART_CONFIG_TYPE) &&
- (mpc52xx_uart_request_port(port) == 0) )
- port->type = PORT_MPC52xx;
-}
-
-static int
-mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- if ( ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx )
- return -EINVAL;
-
- if ( (ser->irq != port->irq) ||
- (ser->io_type != SERIAL_IO_MEM) ||
- (ser->baud_base != port->uartclk) ||
- // FIXME Should check addresses/irq as well ?
- (ser->hub6 != 0 ) )
- return -EINVAL;
-
- return 0;
-}
-
-
-static struct uart_ops mpc52xx_uart_ops = {
- .tx_empty = mpc52xx_uart_tx_empty,
- .set_mctrl = mpc52xx_uart_set_mctrl,
- .get_mctrl = mpc52xx_uart_get_mctrl,
- .stop_tx = mpc52xx_uart_stop_tx,
- .start_tx = mpc52xx_uart_start_tx,
- .send_xchar = mpc52xx_uart_send_xchar,
- .stop_rx = mpc52xx_uart_stop_rx,
- .enable_ms = mpc52xx_uart_enable_ms,
- .break_ctl = mpc52xx_uart_break_ctl,
- .startup = mpc52xx_uart_startup,
- .shutdown = mpc52xx_uart_shutdown,
- .set_termios = mpc52xx_uart_set_termios,
-/* .pm = mpc52xx_uart_pm, Not supported yet */
-/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */
- .type = mpc52xx_uart_type,
- .release_port = mpc52xx_uart_release_port,
- .request_port = mpc52xx_uart_request_port,
- .config_port = mpc52xx_uart_config_port,
- .verify_port = mpc52xx_uart_verify_port
-};
-
-
-/* ======================================================================== */
-/* Interrupt handling */
-/* ======================================================================== */
-
-static inline int
-mpc52xx_uart_int_rx_chars(struct uart_port *port, struct pt_regs *regs)
-{
- struct tty_struct *tty = port->info->tty;
- unsigned char ch;
- unsigned short status;
-
- /* While we can read, do so ! */
- while ( (status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
- MPC52xx_PSC_SR_RXRDY) {
-
- /* If we are full, just stop reading */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
- /* Get the char */
- ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
-
- /* Handle sysreq char */
-#ifdef SUPPORT_SYSRQ
- if (uart_handle_sysrq_char(port, ch, regs)) {
- port->sysrq = 0;
- continue;
- }
-#endif
-
- /* Store it */
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = 0;
- port->icount.rx++;
-
- if ( status & (MPC52xx_PSC_SR_PE |
- MPC52xx_PSC_SR_FE |
- MPC52xx_PSC_SR_RB |
- MPC52xx_PSC_SR_OE) ) {
-
- if (status & MPC52xx_PSC_SR_RB) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
- uart_handle_break(port);
- } else if (status & MPC52xx_PSC_SR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
- else if (status & MPC52xx_PSC_SR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- if (status & MPC52xx_PSC_SR_OE) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < (TTY_FLIPBUF_SIZE-1)) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
-
- /* Clear error condition */
- out_8(&PSC(port)->command,MPC52xx_PSC_RST_ERR_STAT);
-
- }
-
- tty->flip.char_buf_ptr++;
- tty->flip.flag_buf_ptr++;
- tty->flip.count++;
-
- }
-
- tty_flip_buffer_push(tty);
-
- return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
-}
-
-static inline int
-mpc52xx_uart_int_tx_chars(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->info->xmit;
-
- /* Process out of band chars */
- if (port->x_char) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return 1;
- }
-
- /* Nothing to do ? */
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- mpc52xx_uart_stop_tx(port,0);
- return 0;
- }
-
- /* Send chars */
- while (in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- }
-
- /* Wake up */
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- /* Maybe we're done after all */
- if (uart_circ_empty(xmit)) {
- mpc52xx_uart_stop_tx(port,0);
- return 0;
- }
-
- return 1;
-}
-
-static irqreturn_t
-mpc52xx_uart_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct uart_port *port = (struct uart_port *) dev_id;
- unsigned long pass = ISR_PASS_LIMIT;
- unsigned int keepgoing;
- unsigned short status;
-
- if ( irq != port->irq ) {
- printk( KERN_WARNING
- "mpc52xx_uart_int : " \
- "Received wrong int %d. Waiting for %d\n",
- irq, port->irq);
- return IRQ_NONE;
- }
-
- spin_lock(&port->lock);
-
- /* While we have stuff to do, we continue */
- do {
- /* If we don't find anything to do, we stop */
- keepgoing = 0;
-
- /* Read status */
- status = in_be16(&PSC(port)->mpc52xx_psc_isr);
- status &= port->read_status_mask;
-
- /* Do we need to receive chars ? */
- /* For this RX interrupts must be on and some chars waiting */
- if ( status & MPC52xx_PSC_IMR_RXRDY )
- keepgoing |= mpc52xx_uart_int_rx_chars(port, regs);
-
- /* Do we need to send chars ? */
- /* For this, TX must be ready and TX interrupt enabled */
- if ( status & MPC52xx_PSC_IMR_TXRDY )
- keepgoing |= mpc52xx_uart_int_tx_chars(port);
-
- /* Limit number of iteration */
- if ( !(--pass) )
- keepgoing = 0;
-
- } while (keepgoing);
-
- spin_unlock(&port->lock);
-
- return IRQ_HANDLED;
-}
-
-
-/* ======================================================================== */
-/* Console ( if applicable ) */
-/* ======================================================================== */
-
-#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE
-
-static void __init
-mpc52xx_console_get_options(struct uart_port *port,
- int *baud, int *parity, int *bits, int *flow)
-{
- struct mpc52xx_psc *psc = PSC(port);
- unsigned char mr1;
-
- /* Read the mode registers */
- out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
- mr1 = in_8(&psc->mode);
-
- /* CT{U,L}R are write-only ! */
- *baud = __res.bi_baudrate ?
- __res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
-
- /* Parse them */
- switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
- case MPC52xx_PSC_MODE_5_BITS: *bits = 5; break;
- case MPC52xx_PSC_MODE_6_BITS: *bits = 6; break;
- case MPC52xx_PSC_MODE_7_BITS: *bits = 7; break;
- case MPC52xx_PSC_MODE_8_BITS:
- default: *bits = 8;
- }
-
- if (mr1 & MPC52xx_PSC_MODE_PARNONE)
- *parity = 'n';
- else
- *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
-}
-
-static void
-mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_port *port = &mpc52xx_uart_ports[co->index];
- struct mpc52xx_psc *psc = PSC(port);
- unsigned int i, j;
-
- /* Disable interrupts */
- out_be16(&psc->mpc52xx_psc_imr, 0);
-
- /* Wait the TX buffer to be empty */
- j = 5000000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
- udelay(1);
-
- /* Write all the chars */
- for ( i=0 ; i<count ; i++ ) {
-
- /* Send the char */
- out_8(&psc->mpc52xx_psc_buffer_8, *s);
-
- /* Line return handling */
- if ( *s++ == '\n' )
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
-
- /* Wait the TX buffer to be empty */
- j = 20000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXEMP) && --j)
- udelay(1);
- }
-
- /* Restore interrupt state */
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
-}
-
-static int __init
-mpc52xx_console_setup(struct console *co, char *options)
-{
- struct uart_port *port = &mpc52xx_uart_ports[co->index];
-
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM)
- return -EINVAL;
-
- /* Basic port init. Needed since we use some uart_??? func before
- * real init for early access */
- port->lock = SPIN_LOCK_UNLOCKED;
- port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
- port->ops = &mpc52xx_uart_ops;
- port->mapbase = MPC52xx_PSCx(co->index);
-
- /* We ioremap ourself */
- port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc));
- if (port->membase == NULL) {
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
- return -EBUSY;
- }
-
- /* Setup the port parameters accoding to options */
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, co, baud, parity, bits, flow);
-}
-
-
-extern struct uart_driver mpc52xx_uart_driver;
-
-static struct console mpc52xx_console = {
- .name = "ttyS",
- .write = mpc52xx_console_write,
- .device = uart_console_device,
- .setup = mpc52xx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */
- .data = &mpc52xx_uart_driver,
-};
-
-
-static int __init
-mpc52xx_console_init(void)
-{
- register_console(&mpc52xx_console);
- return 0;
-}
-
-console_initcall(mpc52xx_console_init);
-
-#define MPC52xx_PSC_CONSOLE &mpc52xx_console
-#else
-#define MPC52xx_PSC_CONSOLE NULL
-#endif
-
-
-/* ======================================================================== */
-/* UART Driver */
-/* ======================================================================== */
-
-static struct uart_driver mpc52xx_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "mpc52xx_psc_uart",
- .dev_name = "ttyS",
- .devfs_name = "ttyS",
- .major = TTY_MAJOR,
- .minor = 64,
- .nr = MPC52xx_PSC_MAXNUM,
- .cons = MPC52xx_PSC_CONSOLE,
-};
-
-
-/* ======================================================================== */
-/* OCP Driver */
-/* ======================================================================== */
-
-static int __devinit
-mpc52xx_uart_probe(struct ocp_device *ocp)
-{
- struct uart_port *port = NULL;
- int idx, ret;
-
- /* Get the corresponding port struct */
- idx = ocp->def->index;
- if (idx < 0 || idx >= MPC52xx_PSC_MAXNUM)
- return -EINVAL;
-
- port = &mpc52xx_uart_ports[idx];
-
- /* Init the port structure */
- port->lock = SPIN_LOCK_UNLOCKED;
- port->mapbase = ocp->def->paddr;
- port->irq = ocp->def->irq;
- port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
- port->fifosize = 255; /* Should be 512 ! But it can't be */
- /* stored in a unsigned char */
- port->iotype = UPIO_MEM;
- port->flags = UPF_BOOT_AUTOCONF |
- ( uart_console(port) ? 0 : UPF_IOREMAP );
- port->line = idx;
- port->ops = &mpc52xx_uart_ops;
- port->read_status_mask = 0;
-
- /* Requests the mem & irqs */
- /* Unlike other serial drivers, we reserve the resources here, so we
- * can detect early if multiple drivers uses the same PSC. Special
- * care must be taken with the console PSC
- */
- ret = request_irq(
- port->irq, mpc52xx_uart_int,
- SA_INTERRUPT | SA_SAMPLE_RANDOM, "mpc52xx_psc_uart", port);
- if (ret)
- goto error;
-
- ret = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
- "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
- if (ret)
- goto free_irq;
-
- /* Add the port to the uart sub-system */
- ret = uart_add_one_port(&mpc52xx_uart_driver, port);
- if (ret)
- goto release_mem;
-
- ocp_set_drvdata(ocp, (void*)port);
-
- return 0;
-
-
-free_irq:
- free_irq(port->irq, mpc52xx_uart_int);
-
-release_mem:
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
-
-error:
- if (uart_console(port))
- printk( "mpc52xx_uart.c: Error during resource alloction for "
- "the console port !!! Check that the console PSC is "
- "not used by another OCP driver !!!\n" );
-
- return ret;
-}
-
-static void
-mpc52xx_uart_remove(struct ocp_device *ocp)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- ocp_set_drvdata(ocp, NULL);
-
- if (port) {
- uart_remove_one_port(&mpc52xx_uart_driver, port);
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
- free_irq(port->irq, mpc52xx_uart_int);
- }
-}
-
-#ifdef CONFIG_PM
-static int
-mpc52xx_uart_suspend(struct ocp_device *ocp, u32 state)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- uart_suspend_port(&mpc52xx_uart_driver, port);
-
- return 0;
-}
-
-static int
-mpc52xx_uart_resume(struct ocp_device *ocp)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- uart_resume_port(&mpc52xx_uart_driver, port);
-
- return 0;
-}
-#endif
-
-static struct ocp_device_id mpc52xx_uart_ids[] __devinitdata = {
- { .vendor = OCP_VENDOR_FREESCALE, .function = OCP_FUNC_PSC_UART },
- { .vendor = OCP_VENDOR_INVALID /* Terminating entry */ }
-};
-
-MODULE_DEVICE_TABLE(ocp, mpc52xx_uart_ids);
-
-static struct ocp_driver mpc52xx_uart_ocp_driver = {
- .name = "mpc52xx_psc_uart",
- .id_table = mpc52xx_uart_ids,
- .probe = mpc52xx_uart_probe,
- .remove = mpc52xx_uart_remove,
-#ifdef CONFIG_PM
- .suspend = mpc52xx_uart_suspend,
- .resume = mpc52xx_uart_resume,
-#endif
-};
-
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_uart_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Serial: MPC52xx PSC driver\n");
-
- ret = uart_register_driver(&mpc52xx_uart_driver);
- if (ret)
- return ret;
-
- ret = ocp_register_driver(&mpc52xx_uart_ocp_driver);
-
- return ret;
-}
-
-static void __exit
-mpc52xx_uart_exit(void)
-{
- ocp_unregister_driver(&mpc52xx_uart_ocp_driver);
- uart_unregister_driver(&mpc52xx_uart_driver);
-}
-
-
-module_init(mpc52xx_uart_init);
-module_exit(mpc52xx_uart_exit);
-
-MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
-MODULE_DESCRIPTION("Freescale MPC52xx PSC UART");
-MODULE_LICENSE("GPL");
if (tty->flip.count >= TTY_FLIPBUF_SIZE)
drop = 1;
if (ZS_IS_ASLEEP(uap))
- return NULL;
+ return 0;
if (!ZS_IS_OPEN(uap))
goto retry;
}
ioremap(np->addrs[np->n_addrs - 1].address, 0x1000);
if (uap->rx_dma_regs == NULL) {
iounmap((void *)uap->tx_dma_regs);
- uap->tx_dma_regs = NULL;
uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
goto no_dma;
}
uap->port.ops = &pmz_pops;
uap->port.type = PORT_PMAC_ZILOG;
uap->port.flags = 0;
+ spin_lock_init(&uap->port.lock);
/* Setup some valid baud rate information in the register
* shadows so we don't write crap there before baud rate is
{
struct device_node *np;
- np = uap->node;
- iounmap((void *)uap->rx_dma_regs);
- iounmap((void *)uap->tx_dma_regs);
iounmap((void *)uap->control_reg);
+ np = uap->node;
uap->node = NULL;
of_node_put(np);
- memset(uap, 0, sizeof(struct uart_pmac_port));
}
/*
* Register this driver with the serial core
*/
rc = uart_register_driver(&pmz_uart_reg);
- if (rc)
+ if (rc != 0)
return rc;
/*
struct uart_pmac_port *uport = &pmz_ports[i];
/* NULL node may happen on wallstreet */
if (uport->node != NULL)
- rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
- if (rc)
- goto err_out;
+ uart_add_one_port(&pmz_uart_reg, &uport->port);
}
return 0;
-err_out:
- while (i-- > 0) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
- }
- uart_unregister_driver(&pmz_uart_reg);
- return rc;
}
static struct of_match pmz_match[] =
static int __init init_pmz(void)
{
- int rc, i;
printk(KERN_INFO "%s\n", version);
/*
/*
* Now we register with the serial layer
*/
- rc = pmz_register();
- if (rc) {
- printk(KERN_ERR
- "pmac_zilog: Error registering serial device, disabling pmac_zilog.\n"
- "pmac_zilog: Did another serial driver already claim the minors?\n");
- /* effectively "pmz_unprobe()" */
- for (i=0; i < pmz_ports_count; i++)
- pmz_dispose_port(&pmz_ports[i]);
- return rc;
- }
+ pmz_register();
/*
* Then we register the macio driver itself
+++ /dev/null
-/*
- * C-Brick Serial Port (and console) driver for SGI Altix machines.
- *
- * This driver is NOT suitable for talking to the l1-controller for
- * anything other than 'console activities' --- please use the l1
- * driver for that.
- *
- *
- * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/sysrq.h>
-#include <linux/circ_buf.h>
-#include <linux/serial_reg.h>
-#include <linux/delay.h> /* for mdelay */
-#include <linux/miscdevice.h>
-#include <linux/serial_core.h>
-
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn2/sn_private.h>
-#include <asm/sn/sn_sal.h>
-
-/* number of characters we can transmit to the SAL console at a time */
-#define SN_SAL_MAX_CHARS 120
-
-/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
- * avoid losing chars, (always has to be a power of 2) */
-#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
-
-#define SN_SAL_UART_FIFO_DEPTH 16
-#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
-
-/* sn_transmit_chars() calling args */
-#define TRANSMIT_BUFFERED 0
-#define TRANSMIT_RAW 1
-
-/* To use dynamic numbers only and not use the assigned major and minor,
- * define the following.. */
-/* #define USE_DYNAMIC_MINOR 1 */ /* use dynamic minor number */
-#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
-
-/* Device name we're using */
-#define DEVICE_NAME "ttySG"
-#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
-/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
-#define DEVICE_MAJOR 204
-#define DEVICE_MINOR 40
-
-/*
- * Port definition - this kinda drives it all
- */
-struct sn_cons_port {
- struct timer_list sc_timer;
- struct uart_port sc_port;
- struct sn_sal_ops {
- int (*sal_puts_raw) (const char *s, int len);
- int (*sal_puts) (const char *s, int len);
- int (*sal_getc) (void);
- int (*sal_input_pending) (void);
- void (*sal_wakeup_transmit) (struct sn_cons_port *, int);
- } *sc_ops;
- unsigned long sc_interrupt_timeout;
- int sc_is_asynch;
-};
-
-static struct sn_cons_port sal_console_port;
-
-/* Only used if USE_DYNAMIC_MINOR is set to 1 */
-static struct miscdevice misc; /* used with misc_register for dynamic */
-
-extern u64 master_node_bedrock_address;
-extern void early_sn_setup(void);
-
-#undef DEBUG
-#ifdef DEBUG
-static int sn_debug_printf(const char *fmt, ...);
-#define DPRINTF(x...) sn_debug_printf(x)
-#else
-#define DPRINTF(x...) do { } while (0)
-#endif
-
-/* Prototypes */
-static int snt_hw_puts_raw(const char *, int);
-static int snt_hw_puts_buffered(const char *, int);
-static int snt_poll_getc(void);
-static int snt_poll_input_pending(void);
-static int snt_sim_puts(const char *, int);
-static int snt_sim_getc(void);
-static int snt_sim_input_pending(void);
-static int snt_intr_getc(void);
-static int snt_intr_input_pending(void);
-static void sn_transmit_chars(struct sn_cons_port *, int);
-
-/* A table for polling:
- */
-static struct sn_sal_ops poll_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_raw,
- .sal_getc = snt_poll_getc,
- .sal_input_pending = snt_poll_input_pending
-};
-
-/* A table for the simulator */
-static struct sn_sal_ops sim_ops = {
- .sal_puts_raw = snt_sim_puts,
- .sal_puts = snt_sim_puts,
- .sal_getc = snt_sim_getc,
- .sal_input_pending = snt_sim_input_pending
-};
-
-/* A table for interrupts enabled */
-static struct sn_sal_ops intr_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_buffered,
- .sal_getc = snt_intr_getc,
- .sal_input_pending = snt_intr_input_pending,
- .sal_wakeup_transmit = sn_transmit_chars
-};
-
-/* the console does output in two distinctly different ways:
- * synchronous (raw) and asynchronous (buffered). initally, early_printk
- * does synchronous output. any data written goes directly to the SAL
- * to be output (incidentally, it is internally buffered by the SAL)
- * after interrupts and timers are initialized and available for use,
- * the console init code switches to asynchronous output. this is
- * also the earliest opportunity to begin polling for console input.
- * after console initialization, console output and tty (serial port)
- * output is buffered and sent to the SAL asynchronously (either by
- * timer callback or by UART interrupt) */
-
-
-/* routines for running the console in polling mode */
-
-/**
- * snt_poll_getc - Get a character from the console in polling mode
- *
- */
-static int
-snt_poll_getc(void)
-{
- int ch;
-
- ia64_sn_console_getc(&ch);
- return ch;
-}
-
-/**
- * snt_poll_input_pending - Check if any input is waiting - polling mode.
- *
- */
-static int
-snt_poll_input_pending(void)
-{
- int status, input;
-
- status = ia64_sn_console_check(&input);
- return !status && input;
-}
-
-/* routines for running the console on the simulator */
-
-/**
- * snt_sim_puts - send to the console, used in simulator mode
- * @str: String to send
- * @count: length of string
- *
- */
-static int
-snt_sim_puts(const char *str, int count)
-{
- int counter = count;
-
-#ifdef FLAG_DIRECT_CONSOLE_WRITES
- /* This is an easy way to pre-pend the output to know whether the output
- * was done via sal or directly */
- writeb('[', master_node_bedrock_address + (UART_TX << 3));
- writeb('+', master_node_bedrock_address + (UART_TX << 3));
- writeb(']', master_node_bedrock_address + (UART_TX << 3));
- writeb(' ', master_node_bedrock_address + (UART_TX << 3));
-#endif /* FLAG_DIRECT_CONSOLE_WRITES */
- while (counter > 0) {
- writeb(*str, master_node_bedrock_address + (UART_TX << 3));
- counter--;
- str++;
- }
- return count;
-}
-
-/**
- * snt_sim_getc - Get character from console in simulator mode
- *
- */
-static int
-snt_sim_getc(void)
-{
- return readb(master_node_bedrock_address + (UART_RX << 3));
-}
-
-/**
- * snt_sim_input_pending - Check if there is input pending in simulator mode
- *
- */
-static int
-snt_sim_input_pending(void)
-{
- return readb(master_node_bedrock_address +
- (UART_LSR << 3)) & UART_LSR_DR;
-}
-
-/* routines for an interrupt driven console (normal) */
-
-/**
- * snt_intr_getc - Get a character from the console, interrupt mode
- *
- */
-static int
-snt_intr_getc(void)
-{
- return ia64_sn_console_readc();
-}
-
-/**
- * snt_intr_input_pending - Check if input is pending, interrupt mode
- *
- */
-static int
-snt_intr_input_pending(void)
-{
- return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
-}
-
-/* these functions are polled and interrupt */
-
-/**
- * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int
-snt_hw_puts_raw(const char *s, int len)
-{
- /* this will call the PROM and not return until this is done */
- return ia64_sn_console_putb(s, len);
-}
-
-/**
- * snt_hw_puts_buffered - Send string to console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int
-snt_hw_puts_buffered(const char *s, int len)
-{
- /* queue data to the PROM */
- return ia64_sn_console_xmit_chars((char *)s, len);
-}
-
-/* uart interface structs
- * These functions are associated with the uart_port that the serial core
- * infrastructure calls.
- *
- * Note: Due to how the console works, many routines are no-ops.
- */
-
-/**
- * snp_type - What type of console are we?
- * @port: Port to operate with (we ignore since we only have one port)
- *
- */
-static const char *
-snp_type(struct uart_port *port)
-{
- return ("SGI SN L1");
-}
-
-/**
- * snp_tx_empty - Is the transmitter empty? We pretend we're always empty
- * @port: Port to operate on (we ignore since we only have one port)
- *
- */
-static unsigned int
-snp_tx_empty(struct uart_port *port)
-{
- return 1;
-}
-
-/**
- * snp_stop_tx - stop the transmitter - no-op for us
- * @port: Port to operat eon - we ignore - no-op function
- * @tty_stop: Set to 1 if called via uart_stop
- *
- */
-static void
-snp_stop_tx(struct uart_port *port, unsigned int tty_stop)
-{
-}
-
-/**
- * snp_release_port - Free i/o and resources for port - no-op for us
- * @port: Port to operate on - we ignore - no-op function
- *
- */
-static void
-snp_release_port(struct uart_port *port)
-{
-}
-
-/**
- * snp_enable_ms - Force modem status interrupts on - no-op for us
- * @port: Port to operate on - we ignore - no-op function
- *
- */
-static void
-snp_enable_ms(struct uart_port *port)
-{
-}
-
-/**
- * snp_shutdown - shut down the port - free irq and disable - no-op for us
- * @port: Port to shut down - we ignore
- *
- */
-static void
-snp_shutdown(struct uart_port *port)
-{
-}
-
-/**
- * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console
- * @port: Port to operate on - we ignore
- * @mctrl: Lines to set/unset - we ignore
- *
- */
-static void
-snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-}
-
-/**
- * snp_get_mctrl - get contorl line info, we just return a static value
- * @port: port to operate on - we only have one port so we ignore this
- *
- */
-static unsigned int
-snp_get_mctrl(struct uart_port *port)
-{
- return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
-}
-
-/**
- * snp_stop_rx - Stop the receiver - we ignor ethis
- * @port: Port to operate on - we ignore
- *
- */
-static void
-snp_stop_rx(struct uart_port *port)
-{
-}
-
-/**
- * snp_start_tx - Start transmitter
- * @port: Port to operate on
- * @tty_stop: Set to 1 if called via uart_start
- *
- */
-static void
-snp_start_tx(struct uart_port *port, unsigned int tty_stop)
-{
- if (sal_console_port.sc_ops->sal_wakeup_transmit)
- sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED);
-
-}
-
-/**
- * snp_break_ctl - handle breaks - ignored by us
- * @port: Port to operate on
- * @break_state: Break state
- *
- */
-static void
-snp_break_ctl(struct uart_port *port, int break_state)
-{
-}
-
-/**
- * snp_startup - Start up the serial port - always return 0 (We're always on)
- * @port: Port to operate on
- *
- */
-static int
-snp_startup(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_set_termios - set termios stuff - we ignore these
- * @port: port to operate on
- * @termios: New settings
- * @termios: Old
- *
- */
-static void
-snp_set_termios(struct uart_port *port, struct termios *termios,
- struct termios *old)
-{
-}
-
-/**
- * snp_request_port - allocate resources for port - ignored by us
- * @port: port to operate on
- *
- */
-static int
-snp_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_config_port - allocate resources, set up - we ignore, we're always on
- * @port: Port to operate on
- * @flags: flags used for port setup
- *
- */
-static void
-snp_config_port(struct uart_port *port, int flags)
-{
-}
-
-/* Associate the uart functions above - given to serial core */
-
-static struct uart_ops sn_console_ops = {
- .tx_empty = snp_tx_empty,
- .set_mctrl = snp_set_mctrl,
- .get_mctrl = snp_get_mctrl,
- .stop_tx = snp_stop_tx,
- .start_tx = snp_start_tx,
- .stop_rx = snp_stop_rx,
- .enable_ms = snp_enable_ms,
- .break_ctl = snp_break_ctl,
- .startup = snp_startup,
- .shutdown = snp_shutdown,
- .set_termios = snp_set_termios,
- .pm = NULL,
- .type = snp_type,
- .release_port = snp_release_port,
- .request_port = snp_request_port,
- .config_port = snp_config_port,
- .verify_port = NULL,
-};
-
-/* End of uart struct functions and defines */
-
-#ifdef DEBUG
-
-/**
- * sn_debug_printf - close to hardware debugging printf
- * @fmt: printf format
- *
- * This is as "close to the metal" as we can get, used when the driver
- * itself may be broken.
- *
- */
-static int
-sn_debug_printf(const char *fmt, ...)
-{
- static char printk_buf[1024];
- int printed_len;
- va_list args;
-
- va_start(args, fmt);
- printed_len = vsnprintf(printk_buf, sizeof (printk_buf), fmt, args);
-
- if (!sal_console_port.sc_ops) {
- if (IS_RUNNING_ON_SIMULATOR())
- sal_console_port.sc_ops = &sim_ops;
- else
- sal_console_port.sc_ops = &poll_ops;
-
- early_sn_setup();
- }
- sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len);
-
- va_end(args);
- return printed_len;
-}
-#endif /* DEBUG */
-
-/*
- * Interrupt handling routines.
- */
-
-
-/**
- * sn_receive_chars - Grab characters, pass them to tty layer
- * @port: Port to operate on
- * @regs: Saved registers (needed by uart_handle_sysrq_char)
- *
- * Note: If we're not registered with the serial core infrastructure yet,
- * we don't try to send characters to it...
- *
- */
-static void
-sn_receive_chars(struct sn_cons_port *port, struct pt_regs *regs)
-{
- int ch;
- struct tty_struct *tty;
-
- if (!port) {
- printk(KERN_ERR "sn_receive_chars - port NULL so can't receieve\n");
- return;
- }
-
- if (!port->sc_ops) {
- printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receieve\n");
- return;
- }
-
- if (port->sc_port.info) {
- /* The serial_core stuffs are initilized, use them */
- tty = port->sc_port.info->tty;
- }
- else {
- /* Not registered yet - can't pass to tty layer. */
- tty = NULL;
- }
-
- while (port->sc_ops->sal_input_pending()) {
- ch = port->sc_ops->sal_getc();
- if (ch < 0) {
- printk(KERN_ERR "sn_console: An error occured while "
- "obtaining data from the console (0x%0x)\n", ch);
- break;
- }
-#if defined(CONFIG_SERIAL_SGI_L1_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
- if (uart_handle_sysrq_char(&port->sc_port, ch, regs))
- continue;
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE && CONFIG_MAGIC_SYSRQ */
-
- /* record the character to pass up to the tty layer */
- if (tty) {
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- if (tty->flip.count == TTY_FLIPBUF_SIZE)
- break;
- }
- else {
- }
- port->sc_port.icount.rx++;
- }
-
- if (tty)
- tty_flip_buffer_push(tty);
-}
-
-/**
- * sn_transmit_chars - grab characters from serial core, send off
- * @port: Port to operate on
- * @raw: Transmit raw or buffered
- *
- * Note: If we're early, before we're registered with serial core, the
- * writes are going through sn_sal_console_write because that's how
- * register_console has been set up. We currently could have asynch
- * polls calling this function due to sn_sal_switch_to_asynch but we can
- * ignore them until we register with the serial core stuffs.
- *
- */
-static void
-sn_transmit_chars(struct sn_cons_port *port, int raw)
-{
- int xmit_count, tail, head, loops, ii;
- int result;
- char *start;
- struct circ_buf *xmit;
-
- if (!port)
- return;
-
- BUG_ON(!port->sc_is_asynch);
-
- if (port->sc_port.info) {
- /* We're initilized, using serial core infrastructure */
- xmit = &port->sc_port.info->xmit;
- }
- else {
- /* Probably sn_sal_switch_to_asynch has been run but serial core isn't
- * initilized yet. Just return. Writes are going through
- * sn_sal_console_write (due to register_console) at this time.
- */
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) {
- /* Nothing to do. */
- return;
- }
-
- head = xmit->head;
- tail = xmit->tail;
- start = &xmit->buf[tail];
-
- /* twice around gets the tail to the end of the buffer and
- * then to the head, if needed */
- loops = (head < tail) ? 2 : 1;
-
- for (ii = 0; ii < loops; ii++) {
- xmit_count = (head < tail) ?
- (UART_XMIT_SIZE - tail) : (head - tail);
-
- if (xmit_count > 0) {
- if (raw == TRANSMIT_RAW)
- result =
- port->sc_ops->sal_puts_raw(start,
- xmit_count);
- else
- result =
- port->sc_ops->sal_puts(start, xmit_count);
-#ifdef DEBUG
- if (!result)
- DPRINTF("`");
-#endif
- if (result > 0) {
- xmit_count -= result;
- port->sc_port.icount.tx += result;
- tail += result;
- tail &= UART_XMIT_SIZE - 1;
- xmit->tail = tail;
- start = &xmit->buf[tail];
- }
- }
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&port->sc_port);
-
- if (uart_circ_empty(xmit))
- snp_stop_tx(&port->sc_port, 0); /* no-op for us */
-}
-
-/**
- * sn_sal_interrupt - Handle console interrupts
- * @irq: irq #, useful for debug statements
- * @dev_id: our pointer to our port (sn_cons_port which contains the uart port)
- * @regs: Saved registers, used by sn_receive_chars for uart_handle_sysrq_char
- *
- */
-static irqreturn_t
-sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct sn_cons_port *port = (struct sn_cons_port *) dev_id;
- unsigned long flags;
- int status = ia64_sn_console_intr_status();
-
- if (!port)
- return IRQ_NONE;
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
- if (status & SAL_CONSOLE_INTR_RECV) {
- sn_receive_chars(port, regs);
- }
- if (status & SAL_CONSOLE_INTR_XMIT) {
- sn_transmit_chars(port, TRANSMIT_BUFFERED);
- }
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- return IRQ_HANDLED;
-}
-
-/**
- * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * returns the console irq if interrupt is successfully registered, else 0
- *
- */
-static int
-sn_sal_connect_interrupt(struct sn_cons_port *port)
-{
- if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt, SA_INTERRUPT,
- "SAL console driver", port) >= 0) {
- return SGI_UART_VECTOR;
- }
-
- printk(KERN_INFO "sn_console: console proceeding in polled mode\n");
- return 0;
-}
-
-/**
- * sn_sal_timer_poll - this function handles polled console mode
- * @data: A pointer to our sn_cons_port (which contains the uart port)
- *
- * data is the pointer that init_timer will store for us. This function is
- * associated with init_timer to see if there is any console traffic.
- * Obviously not used in interrupt mode
- *
- */
-static void
-sn_sal_timer_poll(unsigned long data)
-{
- struct sn_cons_port *port = (struct sn_cons_port *) data;
- unsigned long flags;
-
- if (!port)
- return;
-
- if (!port->sc_port.irq) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_receive_chars(port, NULL);
- sn_transmit_chars(port, TRANSMIT_RAW);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- mod_timer(&port->sc_timer,
- jiffies + port->sc_interrupt_timeout);
- }
-}
-
-/*
- * Boot-time initialization code
- */
-
-/**
- * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch)
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * So this is used by sn_sal_serial_console_init (early on, before we're
- * registered with serial core). It's also used by sn_sal_module_init
- * right after we've registered with serial core. The later only happens
- * if we didn't already come through here via sn_sal_serial_console_init.
- *
- */
-static void __init
-sn_sal_switch_to_asynch(struct sn_cons_port *port)
-{
- unsigned long flags;
-
- if (!port)
- return;
-
- DPRINTF("sn_console: about to switch to asynchronous console\n");
-
- /* without early_printk, we may be invoked late enough to race
- * with other cpus doing console IO at this point, however
- * console interrupts will never be enabled */
- spin_lock_irqsave(&port->sc_port.lock, flags);
-
- /* early_printk invocation may have done this for us */
- if (!port->sc_ops) {
- if (IS_RUNNING_ON_SIMULATOR())
- port->sc_ops = &sim_ops;
- else
- port->sc_ops = &poll_ops;
- }
-
- /* we can't turn on the console interrupt (as request_irq
- * calls kmalloc, which isn't set up yet), so we rely on a
- * timer to poll for input and push data from the console
- * buffer.
- */
- init_timer(&port->sc_timer);
- port->sc_timer.function = sn_sal_timer_poll;
- port->sc_timer.data = (unsigned long) port;
-
- if (IS_RUNNING_ON_SIMULATOR())
- port->sc_interrupt_timeout = 6;
- else {
- /* 960cps / 16 char FIFO = 60HZ
- * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
- port->sc_interrupt_timeout =
- HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
- }
- mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout);
-
- port->sc_is_asynch = 1;
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-}
-
-/**
- * sn_sal_switch_to_interrupts - Switch to interrupt driven mode
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * In sn_sal_module_init, after we're registered with serial core and
- * the port is added, this function is called to switch us to interrupt
- * mode. We were previously in asynch/polling mode (using init_timer).
- *
- * We attempt to switch to interrupt mode here by calling
- * sn_sal_connect_interrupt. If that works out, we enable receive interrupts.
- */
-static void __init
-sn_sal_switch_to_interrupts(struct sn_cons_port *port)
-{
- int irq;
- unsigned long flags;
-
- if (!port)
- return;
-
- DPRINTF("sn_console: switching to interrupt driven console\n");
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
-
- irq = sn_sal_connect_interrupt(port);
-
- if (irq) {
- port->sc_port.irq = irq;
- port->sc_ops = &intr_ops;
-
- /* turn on receive interrupts */
- ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
- }
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-}
-
-/*
- * Kernel console definitions
- */
-
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
-static void sn_sal_console_write(struct console *, const char *, unsigned);
-static int __init sn_sal_console_setup(struct console *, char *);
-extern struct uart_driver sal_console_uart;
-extern struct tty_driver *uart_console_device(struct console *, int *);
-
-static struct console sal_console = {
- .name = DEVICE_NAME,
- .write = sn_sal_console_write,
- .device = uart_console_device,
- .setup = sn_sal_console_setup,
- .index = -1, /* unspecified */
- .data = &sal_console_uart,
-};
-
-#define SAL_CONSOLE &sal_console
-#else
-#define SAL_CONSOLE 0
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */
-
-static struct uart_driver sal_console_uart = {
- .owner = THIS_MODULE,
- .driver_name = "sn_console",
- .dev_name = DEVICE_NAME,
- .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
- .minor = 0,
- .nr = 1, /* one port */
- .cons = SAL_CONSOLE,
-};
-
-/**
- * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
- *
- * Before this is called, we've been printing kernel messages in a special
- * early mode not making use of the serial core infrastructure. When our
- * driver is loaded for real, we register the driver and port with serial
- * core and try to enable interrupt driven mode.
- *
- */
-static int __init
-sn_sal_module_init(void)
-{
- int retval;
-
- printk(KERN_INFO "sn_console: Console driver init\n");
-
- if (!ia64_platform_is("sn2"))
- return -ENODEV;
-
- if (USE_DYNAMIC_MINOR == 1) {
- misc.minor = MISC_DYNAMIC_MINOR;
- misc.name = DEVICE_NAME_DYNAMIC;
- retval = misc_register(&misc);
- if (retval != 0) {
- printk("Failed to register console device using misc_register.\n");
- return -ENODEV;
- }
- sal_console_uart.major = MISC_MAJOR;
- sal_console_uart.minor = misc.minor;
- }
- else {
- sal_console_uart.major = DEVICE_MAJOR;
- sal_console_uart.minor = DEVICE_MINOR;
- }
-
- /* We register the driver and the port before switching to interrupts
- * or async above so the proper uart structures are populated */
-
- if (uart_register_driver(&sal_console_uart) < 0) {
- printk("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
- __LINE__);
- return -ENODEV;
- }
-
- sal_console_port.sc_port.lock = SPIN_LOCK_UNLOCKED;
-
- /* Setup the port struct with the minimum needed */
- sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */
- sal_console_port.sc_port.type = PORT_16550A;
- sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS;
- sal_console_port.sc_port.ops = &sn_console_ops;
- sal_console_port.sc_port.line = 0;
-
- if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
- /* error - not sure what I'd do - so I'll do nothing */
- printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__);
- }
-
- /* when this driver is compiled in, the console initialization
- * will have already switched us into asynchronous operation
- * before we get here through the module initcalls */
- if (!sal_console_port.sc_is_asynch) {
- sn_sal_switch_to_asynch(&sal_console_port);
- }
-
- /* at this point (module_init) we can try to turn on interrupts */
- if (!IS_RUNNING_ON_SIMULATOR()) {
- sn_sal_switch_to_interrupts(&sal_console_port);
- }
- return 0;
-}
-
-/**
- * sn_sal_module_exit - When we're unloaded, remove the driver/port
- *
- */
-static void __exit
-sn_sal_module_exit(void)
-{
- del_timer_sync(&sal_console_port.sc_timer);
- uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
- uart_unregister_driver(&sal_console_uart);
- misc_deregister(&misc);
-}
-
-module_init(sn_sal_module_init);
-module_exit(sn_sal_module_exit);
-
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
-
-/**
- * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
- * @puts_raw : puts function to do the writing
- * @s: input string
- * @count: length
- *
- * We need a \r ahead of every \n for direct writes through
- * ia64_sn_console_putb (what sal_puts_raw below actually does).
- *
- */
-
-static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count)
-{
- const char *s1;
-
- /* Output '\r' before each '\n' */
- while ((s1 = memchr(s, '\n', count)) != NULL) {
- puts_raw(s, s1 - s);
- puts_raw("\r\n", 2);
- count -= s1 + 1 - s;
- s = s1 + 1;
- }
- puts_raw(s, count);
-}
-
-/**
- * sn_sal_console_write - Print statements before serial core available
- * @console: Console to operate on - we ignore since we have just one
- * @s: String to send
- * @count: length
- *
- * This is referenced in the console struct. It is used for early
- * console printing before we register with serial core and for things
- * such as kdb. The console_lock must be held when we get here.
- *
- * This function has some code for trying to print output even if the lock
- * is held. We try to cover the case where a lock holder could have died.
- * We don't use this special case code if we're not registered with serial
- * core yet. After we're registered with serial core, the only time this
- * function would be used is for high level kernel output like magic sys req,
- * kdb, and printk's.
- */
-static void
-sn_sal_console_write(struct console *co, const char *s, unsigned count)
-{
- unsigned long flags = 0;
- struct sn_cons_port *port = &sal_console_port;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- static int stole_lock = 0;
-#endif
-
- BUG_ON(!port->sc_is_asynch);
-
- /* We can't look at the xmit buffer if we're not registered with serial core
- * yet. So only do the fancy recovery after registering
- */
- if (port->sc_port.info) {
-
- /* somebody really wants this output, might be an
- * oops, kdb, panic, etc. make sure they get it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- if (spin_is_locked(&port->sc_port.lock)) {
- int lhead = port->sc_port.info->xmit.head;
- int ltail = port->sc_port.info->xmit.tail;
- int counter, got_lock = 0;
-
- /*
- * We attempt to determine if someone has died with the
- * lock. We wait ~20 secs after the head and tail ptrs
- * stop moving and assume the lock holder is not functional
- * and plow ahead. If the lock is freed within the time out
- * period we re-get the lock and go ahead normally. We also
- * remember if we have plowed ahead so that we don't have
- * to wait out the time out period again - the asumption
- * is that we will time out again.
- */
-
- for (counter = 0; counter < 150; mdelay(125), counter++) {
- if (!spin_is_locked(&port->sc_port.lock) || stole_lock) {
- if (!stole_lock) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- got_lock = 1;
- }
- break;
- }
- else {
- /* still locked */
- if ((lhead != port->sc_port.info->xmit.head) || (ltail != port->sc_port.info->xmit.tail)) {
- lhead = port->sc_port.info->xmit.head;
- ltail = port->sc_port.info->xmit.tail;
- counter = 0;
- }
- }
- }
- /* flush anything in the serial core xmit buffer, raw */
- sn_transmit_chars(port, 1);
- if (got_lock) {
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- stole_lock = 0;
- }
- else {
- /* fell thru */
- stole_lock = 1;
- }
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
- else {
- stole_lock = 0;
-#endif
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_transmit_chars(port, 1);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
- }
- else {
- /* Not yet registered with serial core - simple case */
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
-}
-
-
-/**
- * sn_sal_console_setup - Set up console for early printing
- * @co: Console to work with
- * @options: Options to set
- *
- * Altix console doesn't do anything with baud rates, etc, anyway.
- *
- * This isn't required since not providing the setup function in the
- * console struct is ok. However, other patches like KDB plop something
- * here so providing it is easier.
- *
- */
-static int __init
-sn_sal_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-/**
- * sn_sal_console_write_early - simple early output routine
- * @co - console struct
- * @s - string to print
- * @count - count
- *
- * Simple function to provide early output, before even
- * sn_sal_serial_console_init is called. Referenced in the
- * console struct registerd in sn_serial_console_early_setup.
- *
- */
-static void __init
-sn_sal_console_write_early(struct console *co, const char *s, unsigned count)
-{
- puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count);
-}
-
-/* Used for very early console printing - again, before
- * sn_sal_serial_console_init is run */
-static struct console sal_console_early __initdata = {
- .name = "sn_sal",
- .write = sn_sal_console_write_early,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/**
- * sn_serial_console_early_setup - Sets up early console output support
- *
- * Register a console early on... This is for output before even
- * sn_sal_serial_cosnole_init is called. This function is called from
- * setup.c. This allows us to do really early polled writes. When
- * sn_sal_serial_console_init is called, this console is unregistered
- * and a new one registered.
- */
-int __init
-sn_serial_console_early_setup(void)
-{
- if (!ia64_platform_is("sn2"))
- return -1;
-
- if (IS_RUNNING_ON_SIMULATOR())
- sal_console_port.sc_ops = &sim_ops;
- else
- sal_console_port.sc_ops = &poll_ops;
-
- early_sn_setup(); /* Find SAL entry points */
- register_console(&sal_console_early);
-
- return 0;
-}
-
-
-/**
- * sn_sal_serial_console_init - Early console output - set up for register
- *
- * This function is called when regular console init happens. Because we
- * support even earlier console output with sn_serial_console_early_setup
- * (called from setup.c directly), this function unregisters the really
- * early console.
- *
- * Note: Even if setup.c doesn't register sal_console_early, unregistering
- * it here doesn't hurt anything.
- *
- */
-static int __init
-sn_sal_serial_console_init(void)
-{
- if (ia64_platform_is("sn2")) {
- sn_sal_switch_to_asynch(&sal_console_port);
- DPRINTF ("sn_sal_serial_console_init : register console\n");
- register_console(&sal_console);
- unregister_console(&sal_console_early);
- }
- return 0;
-}
-
-console_initcall(sn_sal_serial_console_init);
-
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
- baud = simple_strtoul(s, NULL, 0);
+ baud = simple_strtoul(s, 0, 0);
s = strchr(s, ',');
- bits = simple_strtoul(++s, NULL, 0);
+ bits = simple_strtoul(++s, 0, 0);
s = strchr(s, ',');
parity = *(++s);
s = strchr(s, ',');
- stop = simple_strtoul(++s, NULL, 0);
+ stop = simple_strtoul(++s, 0, 0);
s = strchr(s, ',');
/* XXX handshake is not handled here. */
{
unsigned char status1, status2, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
- struct linux_ebus_device *dev = NULL;
+ struct linux_ebus_device *dev = 0;
struct linux_ebus *ebus;
#ifdef CONFIG_SPARC64
struct sparc_isa_bridge *isa_br;
s->count++;
up(&open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_audio_release_mixdev(struct inode *inode, struct file *file)
unsigned int ptr;
int cnt, err;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (as->usbin.dma.mapped)
return -ENXIO;
if (!as->usbin.dma.ready && (ret = prog_dmabuf_in(as)))
unsigned int start_thr;
int cnt, err;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (as->usbout.dma.mapped)
return -ENXIO;
if (!as->usbout.dma.ready && (ret = prog_dmabuf_out(as)))
as->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
s->count++;
up(&open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_audio_release(struct inode *inode, struct file *file)
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
+ if ( ppos != &file->f_pos ) {
+ return -ESPIPE;
+ }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
ssize_t ret;
unsigned long int flags;
+ if ( ppos != &file->f_pos ) {
+ return -ESPIPE;
+ }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
printk(KERN_INFO "usb-midi: Open Succeeded. minor= %d.\n", minor);
#endif
- return nonseekable_open(inode, file); /** Success. **/
+ return 0; /** Success. **/
}
return 0;
}
-#define hub_suspend NULL
-#define hub_resume NULL
+#define hub_suspend 0
+#define hub_resume 0
#define remote_wakeup(x) 0
#endif /* CONFIG_USB_SUSPEND */
int rndis_proc_write (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- rndis_params *p = data;
u32 speed = 0;
int i, fl_speed = 0;
for (i = 0; i < count; i++) {
- char c;
- if (get_user(c, buffer))
- return -EFAULT;
- switch (c) {
+ switch (*buffer) {
case '0':
case '1':
case '2':
case '8':
case '9':
fl_speed = 1;
- speed = speed*10 + c - '0';
+ speed = speed*10 + *buffer - '0';
break;
case 'C':
case 'c':
- rndis_signal_connect (p->confignr);
+ rndis_signal_connect (((rndis_params *) data)
+ ->confignr);
break;
case 'D':
case 'd':
- rndis_signal_disconnect(p->confignr);
+ rndis_signal_disconnect (((rndis_params *) data)
+ ->confignr);
break;
default:
- if (fl_speed) p->speed = speed;
- else DEBUG ("%c is not valid\n", c);
+ if (fl_speed) ((rndis_params *) data)->speed = speed;
+ else DEBUG ("%c is not valid\n", *buffer);
break;
}
size_t nbytes, loff_t *ppos)
{
struct uhci_proc *up = file->private_data;
- return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
+ unsigned int pos;
+ unsigned int size;
+
+ pos = *ppos;
+ size = up->size;
+ if (pos >= size)
+ return 0;
+ if (nbytes > size - pos)
+ nbytes = size - pos;
+
+ if (copy_to_user(buf, up->data + pos, nbytes))
+ return -EFAULT;
+
+ *ppos += nbytes;
+
+ return nbytes;
}
static int uhci_proc_release(struct inode *inode, struct file *file)
file->f_pos = 0;
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int dabusb_release (struct inode *inode, struct file *file)
* Based on the Linux CPiA driver written by Peter Pregler,
* Scott J. Bertin and Johannes Erdfelt.
*
- * Please see the file: Documentation/usb/ov511.txt
+ * Please see the file: linux/Documentation/usb/ov511.txt
* and the website at: http://alpha.dyndns.org/ov511
* for more info.
*
DECLARE_WAITQUEUE(wait, current);
int bytes_to_read;
- Trace(TRACE_READ, "video_read(0x%p, %p, %zd) called.\n", vdev, buf, count);
+ Trace(TRACE_READ, "video_read(0x%p, %p, %d) called.\n", vdev, buf, count);
if (vdev == NULL)
return -EFAULT;
pdev = vdev->priv;
/* file IO stuff */
file->f_pos = 0;
file->private_data = ccp;
- return nonseekable_open(inode, file);
+ return 0;
/* Error exit */
ofail: up (&cp->mutex);
dbg(2, "%s: enter", __FUNCTION__);
- nonseekable_open(inode, file);
subminor = iminor(inode);
down (&disconnect_sem);
*
* Based on dabusb.c, printer.c & scanner.c
*
- * Please see the file: Documentation/usb/silverlink.txt
+ * Please see the file: linux/Documentation/usb/SilverLink.txt
* and the website at: http://lpg.ticalc.org/prj_usb/
* for more info.
*
filp->f_pos = 0;
filp->private_data = s;
- return nonseekable_open(inode, filp);
+ return 0;
}
static int
config FB_CIRRUS
tristate "Cirrus Logic support"
- depends on FB && (ZORRO || PCI)
+ depends on FB && (AMIGA || PCI)
---help---
This enables support for Cirrus Logic GD542x/543x based boards on
Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
- depends on FB && PCI && (BROKEN || !SPARC64)
+ depends on FB && PCI
help
This enables support for the Integraphics CyberPro 20x0 and 5000
VGA chips used in the Rebel.com Netwinder and other machines.
config FB_S3TRIO
bool "S3 Trio display support"
- depends on FB && PPC && BROKEN
+ depends on FB && PPC
help
If you have a S3 Trio say Y. Say N for S3 Virge.
independently validate video mode parameters, you should say Y
here.
-config FB_RIVA_DEBUG
- bool "Lots of debug output from Riva(nVidia) driver"
- depends on FB_RIVA
- default n
- help
- Say Y here if you want the Riva driver to output all sorts
- of debugging informations to provide to the maintainer when
- something goes wrong.
-
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on FB && AGP && AGP_INTEL && EXPERIMENTAL && PCI
info->fix.ypanstep = 0;
} else {
info->fix.ywrapstep = 0;
- if (par->vmode & FB_VMODE_SMOOTH_XPAN)
+ if (par->vmode &= FB_VMODE_SMOOTH_XPAN)
info->fix.xpanstep = 1;
else
info->fix.xpanstep = 16<<maxfmode;
*/
{
- u_long tmp = DIVUL(200000000000ULL, amiga_eclock);
+ u_long tmp = DIVUL(200E9, amiga_eclock);
pixclock[TAG_SHRES] = (tmp + 4) / 8; /* SHRES: 35 ns / 28 MHz */
pixclock[TAG_HIRES] = (tmp + 2) / 4; /* HIRES: 70 ns / 14 MHz */
}
-#ifdef CONFIG_X86
+#ifdef __i386__
static void * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
{
/* I simplified this code as we used to miss the signatures in
}
return rom_base;
}
-#endif
+#endif /* __i386__ */
#endif /* ndef(__sparc__) */
/* fill in known card constants if pll_block is not available */
#ifndef __sparc__
bios = aty128_map_ROM(par, pdev);
-#ifdef CONFIG_X86
+#ifdef __i386__
if (bios == NULL)
bios = aty128_find_mem_vbios(par);
#endif
case FBIO_ATY128_SET_MIRROR:
if (par->chip_gen != rage_M3)
return -EINVAL;
- rc = get_user(value, (__u32 __user *)arg);
+ rc = get_user(value, (__u32*)arg);
if (rc)
return rc;
par->lcd_on = (value & 0x01) != 0;
if (par->chip_gen != rage_M3)
return -EINVAL;
value = (par->crt_on << 1) | par->lcd_on;
- return put_user(value, (__u32 __user *)arg);
+ return put_user(value, (__u32*)arg);
}
#endif
return -EINVAL;
wait_for_idle(par);
aty128fb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
/* Refresh */
fb_set_suspend(info, 0);
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
if (copy_to_user
- ((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp)))
+ ((struct fbtype *) arg, &fbtyp, sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
case PBOOK_SLEEP_REJECT:
if (par->save_framebuffer) {
vfree(par->save_framebuffer);
- par->save_framebuffer = NULL;
+ par->save_framebuffer = 0;
}
break;
case PBOOK_SLEEP_NOW:
memcpy_toio((void *) info->screen_base,
par->save_framebuffer, nb);
vfree(par->save_framebuffer);
- par->save_framebuffer = NULL;
+ par->save_framebuffer = 0;
}
/* Restore display */
atyfb_set_par(info);
for (m = MIN_M; m <= MAX_M; m++) {
for (n = MIN_N; n <= MAX_N; n++) {
- tempA = 938356; /* 14.31818 * 65536 */
+ tempA = (14.31818 * 65536);
tempA *= (n + 8); /* 43..256 */
tempB = twoToKth * 256;
tempB *= (m + 2); /* 4..32 */
return -ENXIO;
}
-#ifdef CONFIG_X86
+#ifdef __i386__
static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
{
/* I simplified this code as we used to miss the signatures in
return 0;
}
-#endif
+#endif /* __i386__ */
#ifdef CONFIG_PPC_OF
/*
printk(KERN_WARNING "radeonfb: Cannot match card to OF node !\n");
return -ENODEV;
}
- val = (u32 *) get_property(dp, "ATY,RefCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,RefCLK", 0);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return -EINVAL;
rinfo->pll.ref_clk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,SCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,SCLK", 0);
if (val && *val)
rinfo->pll.sclk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,MCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,MCLK", 0);
if (val && *val)
rinfo->pll.mclk = (*val) / 10;
/*
* On x86, the primary display on laptop may have it's BIOS
* ROM elsewhere, try to locate it at the legacy memory hole.
- * We probably need to make sure this is the primary display,
+ * We probably need to make sure this is the primary dispay,
* but that is difficult without some arch support.
*/
-#ifdef CONFIG_X86
+#ifdef __i386__
if (rinfo->bios_seg == NULL)
radeon_find_mem_vbios(rinfo);
-#endif
+#endif /* __i386__ */
/* If both above failed, try the BIOS ROM again for mobility
* chips
/* Restore display & engine */
radeonfb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
/* Refresh */
fb_set_suspend(info, 0);
{
struct cg14_par *par = (struct cg14_par *) info->par;
struct cg14_regs *regs = par->regs;
- struct mdi_cfginfo kmdi, __user *mdii;
+ struct mdi_cfginfo kmdi, *mdii;
unsigned long flags;
int cur_mode, mode, ret = 0;
kmdi.mdi_size = par->ramsize;
spin_unlock_irqrestore(&par->lock, flags);
- mdii = (struct mdi_cfginfo __user *) arg;
+ mdii = (struct mdi_cfginfo *) arg;
if (copy_to_user(mdii, &kmdi, sizeof(kmdi)))
ret = -EFAULT;
break;
case MDI_SET_PIXELMODE:
- if (get_user(mode, (int __user *) arg)) {
+ if (get_user(mode, (int *) arg)) {
ret = -EFAULT;
break;
}
case PBOOK_SLEEP_REJECT:
if (save_framebuffer) {
vfree(save_framebuffer);
- save_framebuffer = NULL;
+ save_framebuffer = 0;
}
break;
case PBOOK_SLEEP_NOW:
if (save_framebuffer) {
memcpy(p->screen_base, save_framebuffer, nb);
vfree(save_framebuffer);
- save_framebuffer = NULL;
+ save_framebuffer = 0;
}
chipsfb_blank(0, p);
break;
*
* Contributors (thanks, all!)
*
- * David Eger:
- * Overhaul for Linux 2.6
+ * David Eger:
+ * Overhaul for Linux 2.6
*
* Jeff Rugen:
* Major contributions; Motorola PowerStack (PPC and PCI) support,
* a run-time table?
*/
static const struct cirrusfb_board_info_rec {
+ cirrusfb_board_t btype; /* chipset enum, not strictly necessary, as
+ * cirrusfb_board_info[] is directly indexed
+ * by this value */
char *name; /* ASCII name of chipset */
long maxclock[5]; /* maximum video clock */
/* for 1/4bpp, 8bpp 15/16bpp, 24bpp, 32bpp - numbers from xorg code */
unsigned char sr1f; /* SR1F VGA initial register value */
} cirrusfb_board_info[] = {
- [BT_SD64] = {
- .name = "CL SD64",
- .maxclock = {
- /* guess */
- /* the SD64/P4 have a higher max. videoclock */
- 140000, 140000, 140000, 140000, 140000,
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0xF0,
- .sr07_1bpp = 0xF0,
- .sr07_8bpp = 0xF1,
- .sr1f = 0x20
- },
- [BT_PICCOLO] = {
- .name = "CL Piccolo",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x80,
- .sr07_1bpp = 0x80,
- .sr07_8bpp = 0x81,
- .sr1f = 0x22
- },
- [BT_PICASSO] = {
- .name = "CL Picasso",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x20,
- .sr07_1bpp = 0x20,
- .sr07_8bpp = 0x21,
- .sr1f = 0x22
- },
- [BT_SPECTRUM] = {
- .name = "CL Spectrum",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x80,
- .sr07_1bpp = 0x80,
- .sr07_8bpp = 0x81,
- .sr1f = 0x22
- },
- [BT_PICASSO4] = {
- .name = "CL Picasso4",
- .maxclock = {
- 135100, 135100, 85500, 85500, 0
- },
- .init_sr07 = TRUE,
- .init_sr1f = FALSE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0x20,
- .sr07_1bpp = 0x20,
- .sr07_8bpp = 0x21,
- .sr1f = 0
- },
- [BT_ALPINE] = {
- .name = "CL Alpine",
- .maxclock = {
- /* for the GD5430. GD5446 can do more... */
- 85500, 85500, 50000, 28500, 0
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0xA0,
- .sr07_1bpp = 0xA1,
- .sr07_1bpp_mux = 0xA7,
- .sr07_8bpp = 0xA1,
- .sr07_8bpp_mux = 0xA7,
- .sr1f = 0x1C
- },
- [BT_GD5480] = {
- .name = "CL GD5480",
- .maxclock = {
- 135100, 200000, 200000, 135100, 135100
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0x10,
- .sr07_1bpp = 0x11,
- .sr07_8bpp = 0x11,
- .sr1f = 0x1C
- },
- [BT_LAGUNA] = {
- .name = "CL Laguna",
- .maxclock = {
- /* guess */
- 135100, 135100, 135100, 135100, 135100,
- },
- .init_sr07 = FALSE,
- .init_sr1f = FALSE,
- .scrn_start_bit19 = TRUE,
- }
+ { BT_NONE, }, /* dummy record */
+ { BT_SD64,
+ "CL SD64",
+ { 140000, 140000, 140000, 140000, 140000, }, /* guess */
+ /* the SD64/P4 have a higher max. videoclock */
+ TRUE,
+ TRUE,
+ TRUE,
+ 0xF0,
+ 0xF0,
+ 0, /* unused, does not multiplex */
+ 0xF1,
+ 0, /* unused, does not multiplex */
+ 0x20 },
+ { BT_PICCOLO,
+ "CL Piccolo",
+ { 90000, 90000, 90000, 90000, 90000 }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x80,
+ 0x80,
+ 0, /* unused, does not multiplex */
+ 0x81,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_PICASSO,
+ "CL Picasso",
+ { 90000, 90000, 90000, 90000, 90000, }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x20,
+ 0x20,
+ 0, /* unused, does not multiplex */
+ 0x21,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_SPECTRUM,
+ "CL Spectrum",
+ { 90000, 90000, 90000, 90000, 90000, }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x80,
+ 0x80,
+ 0, /* unused, does not multiplex */
+ 0x81,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_PICASSO4,
+ "CL Picasso4",
+ { 135100, 135100, 85500, 85500, 0 },
+ TRUE,
+ FALSE,
+ TRUE,
+ 0x20,
+ 0x20,
+ 0, /* unused, does not multiplex */
+ 0x21,
+ 0, /* unused, does not multiplex */
+ 0 },
+ { BT_ALPINE,
+ "CL Alpine",
+ { 85500, 85500, 50000, 28500, 0}, /* for the GD5430. GD5446 can do more... */
+ TRUE,
+ TRUE,
+ TRUE,
+ 0xA0,
+ 0xA1,
+ 0xA7,
+ 0xA1,
+ 0xA7,
+ 0x1C },
+ { BT_GD5480,
+ "CL GD5480",
+ { 135100, 200000, 200000, 135100, 135100 },
+ TRUE,
+ TRUE,
+ TRUE,
+ 0x10,
+ 0x11,
+ 0, /* unused, does not multiplex */
+ 0x11,
+ 0, /* unused, does not multiplex */
+ 0x1C },
+ { BT_LAGUNA,
+ "CL Laguna",
+ { 135100, 135100, 135100, 135100, 135100, }, /* guess */
+ FALSE,
+ FALSE,
+ TRUE,
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0 }, /* unused */
};
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_##id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (btype) }
static struct pci_device_id cirrusfb_pci_table[] = {
- CHIP( CIRRUS_5436, BT_ALPINE ),
- CHIP( CIRRUS_5434_8, BT_ALPINE ),
- CHIP( CIRRUS_5434_4, BT_ALPINE ),
- CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
- CHIP( CIRRUS_7543, BT_ALPINE ),
- CHIP( CIRRUS_7548, BT_ALPINE ),
+ CHIP( CIRRUS_5436, BT_ALPINE ),
+ CHIP( CIRRUS_5434_8, BT_ALPINE ),
+ CHIP( CIRRUS_5434_4, BT_ALPINE ),
+ CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
+ CHIP( CIRRUS_7543, BT_ALPINE ),
+ CHIP( CIRRUS_7548, BT_ALPINE ),
CHIP( CIRRUS_5480, BT_GD5480 ), /* MacPicasso probably */
CHIP( CIRRUS_5446, BT_PICASSO4 ), /* Picasso 4 is a GD5446 */
CHIP( CIRRUS_5462, BT_LAGUNA ), /* CL Laguna */
#ifdef CONFIG_ZORRO
-static const struct zorro_device_id cirrusfb_zorro_table[] = {
- {
- .id = ZORRO_PROD_HELFRICH_SD64_RAM,
- .driver_data = BT_SD64,
- }, {
- .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- .driver_data = BT_PICCOLO,
- }, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- .driver_data = BT_PICASSO,
- }, {
- .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- .driver_data = BT_SPECTRUM,
- }, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- .driver_data = BT_PICASSO4,
- },
- { 0 }
-};
-
static const struct {
- zorro_id id2;
+ cirrusfb_board_t btype;
+ zorro_id id, id2;
unsigned long size;
-} cirrusfb_zorro_table2[] = {
- [BT_SD64] = {
- .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
- .size = 0x400000
- },
- [BT_PICCOLO] = {
- .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
- .size = 0x200000
- },
- [BT_PICASSO] = {
- .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- .size = 0x200000
- },
- [BT_SPECTRUM] = {
- .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- .size = 0x200000
- },
- [BT_PICASSO4] = {
- .id2 = 0,
- .size = 0x400000
- }
+} cirrusfb_zorro_probe_list[] __initdata = {
+ { BT_SD64,
+ ZORRO_PROD_HELFRICH_SD64_RAM,
+ ZORRO_PROD_HELFRICH_SD64_REG,
+ 0x400000 },
+ { BT_PICCOLO,
+ ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+ ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ 0x200000 },
+ { BT_PICASSO,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ 0x200000 },
+ { BT_SPECTRUM,
+ ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+ ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ 0x200000 },
+ { BT_PICASSO4,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
+ 0,
+ 0x400000 },
};
#endif /* CONFIG_ZORRO */
struct { u8 red, green, blue, pad; } palette[256];
#ifdef CONFIG_ZORRO
- struct zorro_dev *zdev;
+ unsigned long board_addr,
+ board_size;
#endif
+
#ifdef CONFIG_PCI
struct pci_dev *pdev;
#endif
- void (*unmap)(struct cirrusfb_info *cinfo);
};
static const struct {
const char *name;
struct fb_var_screeninfo var;
-} cirrusfb_predefined[] = {
- {
- /* autodetect mode */
- .name = "Autodetect",
- }, {
- /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
- .name = "640x480",
- .var = {
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 40000,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 32,
- .lower_margin = 8,
- .hsync_len = 96,
- .vsync_len = 4,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED
- }
- }, {
- /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
- .name = "800x600",
- .var = {
- .xres = 800,
- .yres = 600,
- .xres_virtual = 800,
- .yres_virtual = 600,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 20000,
- .left_margin = 128,
- .right_margin = 16,
- .upper_margin = 24,
- .lower_margin = 2,
- .hsync_len = 96,
- .vsync_len = 6,
- .vmode = FB_VMODE_NONINTERLACED
- }
- }, {
- /*
- * Modeline from XF86Config:
- * Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
- */
- /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
- .name = "1024x768",
- .var = {
- .xres = 1024,
- .yres = 768,
- .xres_virtual = 1024,
- .yres_virtual = 768,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 12500,
- .left_margin = 144,
- .right_margin = 32,
- .upper_margin = 30,
- .lower_margin = 2,
- .hsync_len = 192,
- .vsync_len = 6,
- .vmode = FB_VMODE_NONINTERLACED
+} cirrusfb_predefined[] =
+
+{
+ {"Autodetect", /* autodetect mode */
+ {0}
+ },
+
+ {"640x480", /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
+ {
+ 640, 480, 640, 480, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 40000, 48, 16, 32, 8, 96, 4,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
+ }
+ },
+
+ {"800x600", /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
+ {
+ 800, 600, 800, 600, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 20000, 128, 16, 24, 2, 96, 6,
+ 0, FB_VMODE_NONINTERLACED
+ }
+ },
+
+ /*
+ Modeline from XF86Config:
+ Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
+ */
+ {"1024x768", /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
+ {
+ 1024, 768, 1024, 768, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 12500, 144, 32, 30, 2, 192, 6,
+ 0, FB_VMODE_NONINTERLACED
}
}
};
static struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_open = cirrusfb_open,
- .fb_release = cirrusfb_release,
+ .fb_release = cirrusfb_release,
.fb_setcolreg = cirrusfb_setcolreg,
.fb_check_var = cirrusfb_check_var,
.fb_set_par = cirrusfb_set_par,
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_1bpp_mux : bi->sr07_1bpp);
+ bi->sr07_1bpp_mux : bi->sr07_1bpp);
break;
case BT_LAGUNA:
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_8bpp_mux : bi->sr07_8bpp);
+ bi->sr07_8bpp_mux : bi->sr07_8bpp);
break;
case BT_LAGUNA:
}
-static void cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
+static void __devexit cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
{
struct pci_dev *pdev = cinfo->pdev;
framebuffer_release(cinfo->info);
pci_disable_device(pdev);
}
-#endif /* CONFIG_PCI */
-
-
-#ifdef CONFIG_ZORRO
-static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
-{
- zorro_release_device(cinfo->zdev);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap ((void *)cinfo->regbase);
- iounmap ((void *)cinfo->fbmem);
- } else {
- if (zorro_resource_start(cinfo->zdev) > 0x01000000)
- iounmap ((void *)cinfo->fbmem);
- }
- framebuffer_release(cinfo->info);
-}
-#endif /* CONFIG_ZORRO */
-
-static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
-{
- struct fb_info *info = cinfo->info;
- struct fb_var_screeninfo *var = &info->var;
-
- info->currcon = -1;
- info->par = cinfo;
- info->pseudo_palette = cinfo->pseudo_palette;
- info->flags = FBINFO_DEFAULT
- | FBINFO_HWACCEL_XPAN
- | FBINFO_HWACCEL_YPAN
- | FBINFO_HWACCEL_FILLRECT
- | FBINFO_HWACCEL_COPYAREA;
- if (noaccel)
- info->flags |= FBINFO_HWACCEL_DISABLED;
- info->fbops = &cirrusfb_ops;
- info->screen_base = cinfo->fbmem;
- if (cinfo->btype == BT_GD5480) {
- if (var->bits_per_pixel == 16)
- info->screen_base += 1 * MB_;
- if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
- info->screen_base += 2 * MB_;
- }
-
- /* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
- sizeof(info->fix.id));
-
- /* monochrome: only 1 memory plane */
- /* 8 bit and above: Use whole memory area */
- info->fix.smem_start = cinfo->fbmem_phys;
- info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
- info->fix.type = cinfo->currentmode.type;
- info->fix.type_aux = 0;
- info->fix.visual = cinfo->currentmode.visual;
- info->fix.xpanstep = 1;
- info->fix.ypanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.line_length = cinfo->currentmode.line_length;
-
- /* FIXME: map region at 0xB8000 if available, fill in here */
- info->fix.mmio_start = cinfo->fbregs_phys;
- info->fix.mmio_len = 0;
- info->fix.accel = FB_ACCEL_NONE;
-
- fb_alloc_cmap(&info->cmap, 256, 0);
-
- return 0;
-}
-static int cirrusfb_register(struct cirrusfb_info *cinfo)
-{
- struct fb_info *info;
- int err;
- cirrusfb_board_t btype;
-
- DPRINTK ("ENTER\n");
- printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
-
- info = cinfo->info;
- btype = cinfo->btype;
-
- /* sanity checks */
- assert (btype != BT_NONE);
-
- DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
-
- /* Make pretend we've set the var so our structures are in a "good" */
- /* state, even though we haven't written the mode to the hw yet... */
- info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
- info->var.activate = FB_ACTIVATE_NOW;
-
- err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
- if (err < 0) {
- /* should never happen */
- DPRINTK("choking on default var... umm, no good.\n");
- goto err_unmap_cirrusfb;
- }
-
- /* set all the vital stuff */
- cirrusfb_set_fbinfo(cinfo);
-
- err = register_framebuffer(info);
- if (err < 0) {
- printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
- goto err_dealloc_cmap;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_dealloc_cmap:
- fb_dealloc_cmap(&info->cmap);
-err_unmap_cirrusfb:
- cinfo->unmap(cinfo);
- return err;
-}
-
-static void __devexit cirrusfb_cleanup (struct fb_info *info)
-{
- struct cirrusfb_info *cinfo = info->par;
- DPRINTK ("ENTER\n");
-
- switch_monitor (cinfo, 0);
-
- unregister_framebuffer (info);
- fb_dealloc_cmap (&info->cmap);
- printk ("Framebuffer unregistered\n");
- cinfo->unmap(cinfo);
-
- DPRINTK ("EXIT\n");
-}
-
-
-#ifdef CONFIG_PCI
-static int cirrusfb_pci_register (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static struct cirrusfb_info *cirrusfb_pci_setup (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cinfo->fbmem_phys = board_addr;
cinfo->size = board_size;
- cinfo->unmap = cirrusfb_pci_unmap;
printk (" RAM (%lu kB) at 0xx%lx, ", cinfo->size / KB_, board_addr);
printk ("Cirrus Logic chipset on PCI bus\n");
- pci_set_drvdata(pdev, info);
- return cirrusfb_register(cinfo);
+ return cinfo;
err_release_legacy:
if (release_io_ports)
err_disable:
pci_disable_device(pdev);
err_out:
- return ret;
+ return ERR_PTR(ret);
}
+#endif /* CONFIG_PCI */
-void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
+
+
+
+#ifdef CONFIG_ZORRO
+static int cirrusfb_zorro_find (struct zorro_dev **z_o,
+ struct zorro_dev **z2_o,
+ cirrusfb_board_t *btype, unsigned long *size)
{
- struct fb_info *info = pci_get_drvdata(pdev);
- DPRINTK ("ENTER\n");
+ struct zorro_dev *z = NULL;
+ int i;
- cirrusfb_cleanup (info);
+ assert (z_o != NULL);
+ assert (btype != NULL);
- DPRINTK ("EXIT\n");
+ for (i = 0; i < ARRAY_SIZE(cirrusfb_zorro_probe_list); i++)
+ if ((z = zorro_find_device(cirrusfb_zorro_probe_list[i].id, NULL)))
+ break;
+
+ if (z) {
+ *z_o = z;
+ if (cirrusfb_zorro_probe_list[i].id2)
+ *z2_o = zorro_find_device(cirrusfb_zorro_probe_list[i].id2, NULL);
+ else
+ *z2_o = NULL;
+
+ *btype = cirrusfb_zorro_probe_list[i].btype;
+ *size = cirrusfb_zorro_probe_list[i].size;
+
+ printk (KERN_INFO "cirrusfb: %s board detected; ",
+ cirrusfb_board_info[*btype].name);
+
+ return 0;
+ }
+
+ printk (KERN_NOTICE "cirrusfb: no supported board found.\n");
+ return -ENODEV;
}
-static struct pci_driver cirrusfb_pci_driver = {
- .name = "cirrusfb",
- .id_table = cirrusfb_pci_table,
- .probe = cirrusfb_pci_register,
- .remove = __devexit_p(cirrusfb_pci_unregister),
-#ifdef CONFIG_PM
-#if 0
- .suspend = cirrusfb_pci_suspend,
- .resume = cirrusfb_pci_resume,
-#endif
-#endif
-};
-#endif /* CONFIG_PCI */
+static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
+{
+ release_mem_region(cinfo->board_addr, cinfo->board_size);
-#ifdef CONFIG_ZORRO
-static int cirrusfb_zorro_register(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+ if (cinfo->btype == BT_PICASSO4) {
+ cinfo->regbase -= 0x600000;
+ iounmap ((void *)cinfo->regbase);
+ iounmap ((void *)cinfo->fbmem);
+ } else {
+ if (cinfo->board_addr > 0x01000000)
+ iounmap ((void *)cinfo->fbmem);
+ }
+ framebuffer_release(cinfo->info);
+}
+
+
+static struct cirrusfb_info *cirrusfb_zorro_setup(void)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cirrusfb_board_t btype;
- struct zorro_dev *z2 = NULL;
+ struct zorro_dev *z = NULL, *z2 = NULL;
unsigned long board_addr, board_size, size;
int ret;
- btype = ent->driver_data;
- if (cirrusfb_zorro_table2[btype].id2)
- z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
- size = cirrusfb_zorro_table2[btype].size;
- printk(KERN_INFO "cirrusfb: %s board detected; ",
- cirrusfb_board_info[btype].name);
+ ret = cirrusfb_zorro_find (&z, &z2, &btype, &size);
+ if (ret < 0)
+ goto err_out;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
assert (z2 >= 0);
assert (btype != BT_NONE);
- cinfo->zdev = z;
- board_addr = zorro_resource_start(z);
- board_size = zorro_resource_len(z);
+ cinfo->board_addr = board_addr = z->resource.start;
+ cinfo->board_size = board_size = z->resource.end-z->resource.start+1;
cinfo->size = size;
- if (!zorro_request_device(z, "cirrusfb")) {
+ if (!request_mem_region(board_addr, board_size, "cirrusfb")) {
printk(KERN_ERR "cirrusfb: cannot reserve region 0x%lx, abort\n",
board_addr);
ret = -EBUSY;
cinfo->fbregs_phys = board_addr + 0x600000;
cinfo->fbmem_phys = board_addr + 16777216;
- cinfo->fbmem = ioremap (cinfo->fbmem_phys, 16777216);
+ cinfo->fbmem = ioremap (info->fbmem_phys, 16777216);
if (!cinfo->fbmem)
goto err_unmap_regbase;
} else {
DPRINTK ("cirrusfb: Virtual address for board set to: $%p\n", cinfo->regbase);
}
- cinfo->unmap = cirrusfb_zorro_unmap;
printk (KERN_INFO "Cirrus Logic chipset on Zorro bus\n");
- zorro_set_drvdata(z, info);
- return cirrusfb_register(cinfo);
+ return 0;
err_unmap_regbase:
/* Parental advisory: explicit hack */
err_release_fb:
framebuffer_release(info);
err_out:
- return ret;
+ return ERR_PTR(ret);
}
+#endif /* CONFIG_ZORRO */
-void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
+static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
{
- struct fb_info *info = zorro_get_drvdata(z);
+ struct fb_info *info = cinfo->info;
+ struct fb_var_screeninfo *var = &info->var;
+
+ info->currcon = -1;
+ info->par = cinfo;
+ info->pseudo_palette = cinfo->pseudo_palette;
+ info->flags = FBINFO_DEFAULT
+ | FBINFO_HWACCEL_XPAN
+ | FBINFO_HWACCEL_YPAN
+ | FBINFO_HWACCEL_FILLRECT
+ | FBINFO_HWACCEL_COPYAREA;
+ if (noaccel)
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->fbops = &cirrusfb_ops;
+ info->screen_base = cinfo->fbmem;
+ if (cinfo->btype == BT_GD5480) {
+ if (var->bits_per_pixel == 16)
+ info->screen_base += 1 * MB_;
+ if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
+ info->screen_base += 2 * MB_;
+ }
+
+ /* Fill fix common fields */
+ strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ sizeof(info->fix.id));
+
+ /* monochrome: only 1 memory plane */
+ /* 8 bit and above: Use whole memory area */
+ info->fix.smem_start = cinfo->fbmem_phys;
+ info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
+ info->fix.type = cinfo->currentmode.type;
+ info->fix.type_aux = 0;
+ info->fix.visual = cinfo->currentmode.visual;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.ywrapstep = 0;
+ info->fix.line_length = cinfo->currentmode.line_length;
+
+ /* FIXME: map region at 0xB8000 if available, fill in here */
+ info->fix.mmio_start = cinfo->fbregs_phys;
+ info->fix.mmio_len = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+
+ fb_alloc_cmap(&info->cmap, 256, 0);
+
+ return 0;
+}
+
+#if defined(CONFIG_PCI)
+#define cirrusfb_unmap cirrusfb_pci_unmap
+#define cirrusfb_bus_setup cirrusfb_pci_setup
+#elif defined(CONFIG_ZORRO)
+#define cirrusfb_unmap cirrusfb_zorro_unmap
+#define cirrusfb_bus_setup cirrusfb_zorro_setup
+#endif
+
+
+static int cirrusfb_pci_register (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct fb_info *info;
+ struct cirrusfb_info *cinfo = NULL;
+ int err;
+ cirrusfb_board_t btype;
+
+ DPRINTK ("ENTER\n");
+
+ printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
+
+ cinfo = cirrusfb_bus_setup(pdev, ent);
+
+ if (IS_ERR(cinfo)) {
+ err = PTR_ERR(cinfo);
+ goto err_out;
+ }
+
+ info = cinfo->info;
+ btype = cinfo->btype;
+
+ /* sanity checks */
+ assert (btype != BT_NONE);
+ assert (btype == cirrusfb_board_info[btype].btype);
+
+ DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
+
+ /* Make pretend we've set the var so our structures are in a "good" */
+ /* state, even though we haven't written the mode to the hw yet... */
+ info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
+ info->var.activate = FB_ACTIVATE_NOW;
+
+ err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
+ if (err < 0) {
+ /* should never happen */
+ DPRINTK("choking on default var... umm, no good.\n");
+ goto err_unmap_cirrusfb;
+ }
+
+ /* set all the vital stuff */
+ cirrusfb_set_fbinfo(cinfo);
+
+ pci_set_drvdata(pdev, info);
+
+ err = register_framebuffer(info);
+ if (err < 0) {
+ printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
+ goto err_dealloc_cmap;
+ }
+
+ DPRINTK ("EXIT, returning 0\n");
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_unmap_cirrusfb:
+ cirrusfb_unmap(cinfo);
+err_out:
+ return err;
+}
+
+
+static void __devexit cirrusfb_cleanup (struct fb_info *info)
+{
+ struct cirrusfb_info *cinfo = info->par;
+ DPRINTK ("ENTER\n");
+
+#ifdef CONFIG_ZORRO
+ switch_monitor (cinfo, 0);
+#endif
+
+ unregister_framebuffer (info);
+ fb_dealloc_cmap (&info->cmap);
+ printk ("Framebuffer unregistered\n");
+ cirrusfb_unmap (cinfo);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
DPRINTK ("ENTER\n");
cirrusfb_cleanup (info);
DPRINTK ("EXIT\n");
}
-static struct zorro_driver cirrusfb_zorro_driver = {
- .name = "cirrusfb",
- .id_table = cirrusfb_zorro_table,
- .probe = cirrusfb_zorro_register,
- .remove = __devexit_p(cirrusfb_zorro_unregister),
+static struct pci_driver cirrusfb_driver = {
+ .name = "cirrusfb",
+ .id_table = cirrusfb_pci_table,
+ .probe = cirrusfb_pci_register,
+ .remove = __devexit_p(cirrusfb_pci_unregister),
+#ifdef CONFIG_PM
+#if 0
+ .suspend = cirrusfb_pci_suspend,
+ .resume = cirrusfb_pci_resume,
+#endif
+#endif
};
-#endif /* CONFIG_ZORRO */
int __init cirrusfb_init(void)
{
- int error = 0;
-
#ifdef CONFIG_ZORRO
- error |= zorro_module_init(&cirrusfb_zorro_driver);
-#endif
-#ifdef CONFIG_PCI
- error |= pci_module_init(&cirrusfb_pci_driver);
+ return cirrusfb_pci_register(NULL, NULL);
+#else
+ return pci_module_init(&cirrusfb_driver);
#endif
- return error;
}
void __exit cirrusfb_exit (void)
{
-#ifdef CONFIG_PCI
- pci_unregister_driver(&cirrusfb_pci_driver);
-#endif
-#ifdef CONFIG_ZORRO
- zorro_unregister_driver(&cirrusfb_zorro_driver);
-#endif
+ pci_unregister_driver (&cirrusfb_driver);
}
#ifdef MODULE
static void cirrusfb_BitBLT (caddr_t regbase, int bits_per_pixel,
u_short curx, u_short cury, u_short destx, u_short desty,
- u_short width, u_short height, u_short line_length)
+ u_short width, u_short height, u_short line_length)
{
u_short nwidth, nheight;
u_long nsrc, ndest;
.con_bmove = DUMMY,
.con_switch = DUMMY,
.con_blank = DUMMY,
- .con_font_set = DUMMY,
- .con_font_get = DUMMY,
- .con_font_default = DUMMY,
- .con_font_copy = DUMMY,
+ .con_font_op = DUMMY,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
};
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
+static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op);
static int fbcon_set_palette(struct vc_data *vc, unsigned char *table);
static int fbcon_scrolldelta(struct vc_data *vc, int lines);
void accel_clear_margins(struct vc_data *vc, struct fb_info *info,
p->userfont = 0;
}
-static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
+static inline int fbcon_get_font(struct vc_data *vc, struct console_font_op *op)
{
u8 *fontdata = vc->vc_font.data;
- u8 *data = font->data;
+ u8 *data = op->data;
int i, j;
- font->width = vc->vc_font.width;
- font->height = vc->vc_font.height;
- font->charcount = vc->vc_hi_font_mask ? 512 : 256;
- if (!font->data)
+ op->width = vc->vc_font.width;
+ op->height = vc->vc_font.height;
+ op->charcount = vc->vc_hi_font_mask ? 512 : 256;
+ if (!op->data)
return 0;
- if (font->width <= 8) {
+ if (op->width <= 8) {
j = vc->vc_font.height;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 32 - j);
data += 32;
fontdata += j;
}
- } else if (font->width <= 16) {
+ } else if (op->width <= 16) {
j = vc->vc_font.height * 2;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 64 - j);
data += 64;
fontdata += j;
}
- } else if (font->width <= 24) {
- for (i = 0; i < font->charcount; i++) {
+ } else if (op->width <= 24) {
+ for (i = 0; i < op->charcount; i++) {
for (j = 0; j < vc->vc_font.height; j++) {
*data++ = fontdata[0];
*data++ = fontdata[1];
}
} else {
j = vc->vc_font.height * 4;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 128 - j);
data += 128;
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+static int fbcon_do_set_font(struct vc_data *vc, struct console_font_op *op,
u8 * data, int userfont)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int resize;
+ int w = op->width;
+ int h = op->height;
int cnt;
char *old_data = NULL;
+ if (!w > 32) {
+ if (userfont && op->op != KD_FONT_OP_COPY)
+ kfree(data - FONT_EXTRA_WORDS * sizeof(int));
+ return -ENXIO;
+ }
+
if (CON_IS_VISIBLE(vc) && softback_lines)
fbcon_set_origin(vc);
return 0;
}
-static int fbcon_copy_font(struct vc_data *vc, int con)
+static inline int fbcon_copy_font(struct vc_data *vc, struct console_font_op *op)
{
- struct display *od = &fb_display[con];
- struct console_font *f = &vc->vc_font;
-
- if (od->fontdata == f->data)
+ struct display *od;
+ int h = op->height;
+
+ if (h < 0 || !vc_cons_allocated(h))
+ return -ENOTTY;
+ if (h == vc->vc_num)
+ return 0; /* nothing to do */
+ od = &fb_display[h];
+ if (od->fontdata == vc->vc_font.data)
return 0; /* already the same font... */
- return fbcon_do_set_font(vc, f->width, f->height, od->fontdata, od->userfont);
+ op->width = vc->vc_font.width;
+ op->height = vc->vc_font.height;
+ return fbcon_do_set_font(vc, op, od->fontdata, od->userfont);
}
-/*
- * User asked to set font; we are guaranteed that
- * a) width and height are in range 1..32
- * b) charcount does not exceed 512
- */
-
-static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags)
+static inline int fbcon_set_font(struct vc_data *vc, struct console_font_op *op)
{
- unsigned charcount = font->charcount;
- int w = font->width;
- int h = font->height;
+ int w = op->width;
+ int h = op->height;
int size = h;
int i, k;
- u8 *new_data, *data = font->data, *p;
+ u8 *new_data, *data = op->data, *p;
- if (charcount != 256 && charcount != 512)
+ if ((w <= 0) || (w > 32)
+ || (op->charcount != 256 && op->charcount != 512))
return -EINVAL;
if (w > 8) {
else
size *= 4;
}
- size *= charcount;
-
- new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
+ size *= op->charcount;
- if (!new_data)
+ if (!
+ (new_data =
+ kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER)))
return -ENOMEM;
-
new_data += FONT_EXTRA_WORDS * sizeof(int);
FNTSIZE(new_data) = size;
- FNTCHARCNT(new_data) = charcount;
+ FNTCHARCNT(new_data) = op->charcount;
REFCOUNT(new_data) = 0; /* usage counter */
p = new_data;
if (w <= 8) {
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 32;
p += h;
}
} else if (w <= 16) {
h *= 2;
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 64;
p += h;
}
} else if (w <= 24) {
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
int j;
for (j = 0; j < h; j++) {
memcpy(p, data, 3);
}
} else {
h *= 4;
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 128;
p += h;
break;
}
}
- return fbcon_do_set_font(vc, font->width, font->height, new_data, 1);
+ return fbcon_do_set_font(vc, op, new_data, 1);
}
-static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
+static inline int fbcon_set_def_font(struct vc_data *vc, struct console_font_op *op)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ char name[MAX_FONT_NAME];
struct font_desc *f;
- if (!name)
+ if (!op->data)
f = get_default_font(info->var.xres, info->var.yres);
- else if (!(f = find_font(name)))
- return -ENOENT;
+ else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
+ return -EFAULT;
+ else {
+ name[MAX_FONT_NAME - 1] = 0;
+ if (!(f = find_font(name)))
+ return -ENOENT;
+ }
+ op->width = f->width;
+ op->height = f->height;
+ return fbcon_do_set_font(vc, op, f->data, 0);
+}
- font->width = f->width;
- font->height = f->height;
- return fbcon_do_set_font(vc, f->width, f->height, f->data, 0);
+static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op)
+{
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return fbcon_set_font(vc, op);
+ case KD_FONT_OP_GET:
+ return fbcon_get_font(vc, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return fbcon_set_def_font(vc, op);
+ case KD_FONT_OP_COPY:
+ return fbcon_copy_font(vc, op);
+ default:
+ return -ENOSYS;
+ }
}
static u16 palette_red[16];
else
palette_cmap.len = 16;
palette_cmap.start = 0;
- return fb_set_cmap(&palette_cmap, info);
+ return fb_set_cmap(&palette_cmap, 1, info);
}
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
.con_bmove = fbcon_bmove,
.con_switch = fbcon_switch,
.con_blank = fbcon_blank,
- .con_font_set = fbcon_set_font,
- .con_font_get = fbcon_get_font,
- .con_font_default = fbcon_set_def_font,
- .con_font_copy = fbcon_copy_font,
+ .con_font_op = fbcon_font_op,
.con_set_palette = fbcon_set_palette,
.con_scrolldelta = fbcon_scrolldelta,
.con_set_origin = fbcon_set_origin,
}
}
+static int mdacon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static int mdacon_scrolldelta(struct vc_data *c, int lines)
{
return 0;
.con_bmove = mdacon_bmove,
.con_switch = mdacon_switch,
.con_blank = mdacon_blank,
+ .con_font_op = mdacon_font_op,
.con_set_palette = mdacon_set_palette,
.con_scrolldelta = mdacon_scrolldelta,
.con_build_attr = mdacon_build_attr,
static int newport_xsize;
static int newport_ysize;
-static int newport_set_def_font(int unit, struct console_font *op);
+static int newport_set_def_font(int unit, struct console_font_op *op);
#define BMASK(c) (c << 24)
return 1;
}
-static int newport_set_font(int unit, struct console_font *op)
+static int newport_set_font(int unit, struct console_font_op *op)
{
int w = op->width;
int h = op->height;
return 0;
}
-static int newport_set_def_font(int unit, struct console_font *op)
+static int newport_set_def_font(int unit, struct console_font_op *op)
{
if (font_data[unit] != FONT_DATA) {
if (--REFCOUNT(font_data[unit]) == 0)
return 0;
}
-static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int newport_font_op(struct vc_data *vc, struct console_font_op *op)
{
- return newport_set_def_font(vc->vc_num, op);
-}
-
-static int newport_font_set(struct vc_data *vc, struct console_font *font, unsigned flags)
-{
- return newport_set_font(vc->vc_num, font);
+ int unit = vc->vc_num;
+
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return newport_set_font(unit, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return newport_set_def_font(unit, op);
+ default:
+ return -ENOSYS;
+ }
}
static int newport_set_palette(struct vc_data *vc, unsigned char *table)
.con_bmove = newport_bmove,
.con_switch = newport_switch,
.con_blank = newport_blank,
- .con_font_set = newport_font_set,
- .con_font_default = newport_font_default,
+ .con_font_op = newport_font_op,
.con_set_palette = newport_set_palette,
.con_scrolldelta = newport_scrolldelta,
.con_set_origin = DUMMY,
}
}
+static int
+promcon_font_op(struct vc_data *conp, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static int
promcon_blank(struct vc_data *conp, int blank, int mode_switch)
{
.con_bmove = promcon_bmove,
.con_switch = promcon_switch,
.con_blank = promcon_blank,
+ .con_font_op = promcon_font_op,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
#if !(PROMCON_COLOR)
return -EINVAL;
}
+static int sticon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
int unit = conp->vc_num;
.con_bmove = sticon_bmove,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
+ .con_font_op = sticon_font_op,
.con_set_palette = sticon_set_palette,
.con_scrolldelta = sticon_scrolldelta,
.con_set_origin = sticon_set_origin,
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op);
static int vgacon_set_palette(struct vc_data *vc, unsigned char *table);
static int vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
return 0;
}
-static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned flags)
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
{
- unsigned charcount = font->charcount;
int rc;
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- if (font->width != 8 || (charcount != 256 && charcount != 512))
- return -EINVAL;
-
- rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512);
- if (rc)
- return rc;
-
- if (!(flags & KD_FONT_FLAG_DONT_RECALC))
- rc = vgacon_adjust_height(c, font->height);
+ if (op->op == KD_FONT_OP_SET) {
+ if (op->width != 8
+ || (op->charcount != 256 && op->charcount != 512))
+ return -EINVAL;
+ rc = vgacon_do_font_op(&state, op->data, 1, op->charcount == 512);
+ if (!rc && !(op->flags & KD_FONT_FLAG_DONT_RECALC))
+ rc = vgacon_adjust_height(c, op->height);
+ } else if (op->op == KD_FONT_OP_GET) {
+ op->width = 8;
+ op->height = c->vc_font.height;
+ op->charcount = vga_512_chars ? 512 : 256;
+ if (!op->data)
+ return 0;
+ rc = vgacon_do_font_op(&state, op->data, 0, 0);
+ } else
+ rc = -ENOSYS;
return rc;
}
-static int vgacon_font_get(struct vc_data *c, struct console_font *font)
-{
- if (vga_video_type < VIDEO_TYPE_EGAM)
- return -EINVAL;
-
- font->width = 8;
- font->height = c->vc_font.height;
- font->charcount = vga_512_chars ? 512 : 256;
- if (!font->data)
- return 0;
- return vgacon_do_font_op(&state, font->data, 0, 0);
-}
-
#else
-#define vgacon_font_set NULL
-#define vgacon_font_get NULL
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
#endif
.con_bmove = DUMMY,
.con_switch = vgacon_switch,
.con_blank = vgacon_blank,
- .con_font_set = vgacon_font_set,
- .con_font_get = vgacon_font_get,
+ .con_font_op = vgacon_font_op,
.con_set_palette = vgacon_set_palette,
.con_scrolldelta = vgacon_scrolldelta,
.con_set_origin = vgacon_set_origin,
};
struct fb_var_screeninfo dnfb_var __devinitdata = {
- .xres = 1280,
- .yres = 1024,
- .xres_virtual = 2048,
- .yres_virtual = 1024,
- .bits_per_pixel = 1,
- .height = -1,
- .width = -1,
- .vmode = FB_VMODE_NONINTERLACED,
+ .xres 1280,
+ .yres 1024,
+ .xres_virtual 2048,
+ .yres_virtual 1024,
+ .bits_per_pixel 1,
+ .height -1,
+ .width -1,
+ .vmode FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo dnfb_fix __devinitdata = {
- .id = "Apollo Mono",
- .smem_start = (FRAME_BUFFER_START + IO_BASE),
- .smem_len = FRAME_BUFFER_LEN,
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_MONO10,
- .line_length = 256,
+ .id "Apollo Mono",
+ .smem_start (FRAME_BUFFER_START + IO_BASE),
+ .smem_len FRAME_BUFFER_LEN,
+ .type FB_TYPE_PACKED_PIXELS,
+ .visual FB_VISUAL_MONO10,
+ .line_length 256,
};
static int dnfb_blank(int blank, struct fb_info *info)
}
cmap->start = 0;
cmap->len = len;
- fb_copy_cmap(fb_default_cmap(len), cmap);
+ fb_copy_cmap(fb_default_cmap(len), cmap, 0);
return 0;
fail:
* fb_copy_cmap - copy a colormap
* @from: frame buffer colormap structure
* @to: frame buffer colormap structure
+ * @fsfromto: determine copy method
*
* Copy contents of colormap from @from to @to.
+ *
+ * @fsfromto accepts the following integer parameters:
+ * 0: memcpy function
+ * 1: copy_from_user() function to copy from userspace
+ * 2: copy_to_user() function to copy to userspace
+ *
*/
-int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to)
+int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto)
{
- int tooff = 0, fromoff = 0;
- int size;
-
- if (to->start > from->start)
- fromoff = to->start - from->start;
- else
- tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
- return -EINVAL;
- size *= sizeof(u16);
-
+ int tooff = 0, fromoff = 0;
+ int size;
+
+ if (to->start > from->start)
+ fromoff = to->start-from->start;
+ else
+ tooff = from->start-to->start;
+ size = to->len-tooff;
+ if (size > (int) (from->len - fromoff))
+ size = from->len-fromoff;
+ if (size <= 0)
+ return -EINVAL;
+ size *= sizeof(u16);
+
+ switch (fsfromto) {
+ case 0:
memcpy(to->red+tooff, from->red+fromoff, size);
memcpy(to->green+tooff, from->green+fromoff, size);
memcpy(to->blue+tooff, from->blue+fromoff, size);
if (from->transp && to->transp)
- memcpy(to->transp+tooff, from->transp+fromoff, size);
- return 0;
-}
-
-int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to)
-{
- int tooff = 0, fromoff = 0;
- int size;
-
- if (to->start > from->start)
- fromoff = to->start - from->start;
- else
- tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
- return -EINVAL;
- size *= sizeof(u16);
-
+ memcpy(to->transp+tooff, from->transp+fromoff, size);
+ break;
+ case 1:
+ if (copy_from_user(to->red+tooff, from->red+fromoff, size))
+ return -EFAULT;
+ if (copy_from_user(to->green+tooff, from->green+fromoff, size))
+ return -EFAULT;
+ if (copy_from_user(to->blue+tooff, from->blue+fromoff, size))
+ return -EFAULT;
+ if (from->transp && to->transp)
+ if (copy_from_user(to->transp+tooff, from->transp+fromoff, size))
+ return -EFAULT;
+ break;
+ case 2:
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
return -EFAULT;
if (copy_to_user(to->green+tooff, from->green+fromoff, size))
if (from->transp && to->transp)
if (copy_to_user(to->transp+tooff, from->transp+fromoff, size))
return -EFAULT;
- return 0;
+ break;
+ }
+ return 0;
}
/**
* fb_set_cmap - set the colormap
* @cmap: frame buffer colormap structure
+ * @kspc: boolean, 1 copy local, 0 get_user() function
* @info: frame buffer info structure
*
* Sets the colormap @cmap for a screen of device @info.
*
*/
-int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *info)
{
- int i, start;
- u16 *red, *green, *blue, *transp;
- u_int hred, hgreen, hblue, htransp = 0xffff;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- if (start < 0 || !info->fbops->fb_setcolreg)
- return -EINVAL;
- for (i = 0; i < cmap->len; i++) {
- hred = *red++;
- hgreen = *green++;
- hblue = *blue++;
- if (transp)
- htransp = *transp++;
- if (info->fbops->fb_setcolreg(start++,
- hred, hgreen, hblue, htransp,
- info))
- break;
+ int i, start;
+ u16 *red, *green, *blue, *transp;
+ u_int hred, hgreen, hblue, htransp;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ if (start < 0 || !info->fbops->fb_setcolreg)
+ return -EINVAL;
+ for (i = 0; i < cmap->len; i++) {
+ if (kspc) {
+ hred = *red;
+ hgreen = *green;
+ hblue = *blue;
+ htransp = transp ? *transp : 0xffff;
+ } else {
+ get_user(hred, red);
+ get_user(hgreen, green);
+ get_user(hblue, blue);
+ if (transp)
+ get_user(htransp, transp);
+ else
+ htransp = 0xffff;
}
- return 0;
+ red++;
+ green++;
+ blue++;
+ if (transp)
+ transp++;
+ if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info))
+ return 0;
+ }
+ return 0;
}
-int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
-{
- int i, start;
- u16 __user *red, *green, *blue, *transp;
- u_int hred, hgreen, hblue, htransp = 0xffff;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- if (start < 0 || !info->fbops->fb_setcolreg)
- return -EINVAL;
- for (i = 0; i < cmap->len; i++, red++, blue++, green++) {
- if (get_user(hred, red) ||
- get_user(hgreen, green) ||
- get_user(hblue, blue) ||
- (transp && get_user(htransp, transp)))
- return -EFAULT;
- if (info->fbops->fb_setcolreg(start++,
- hred, hgreen, hblue, htransp,
- info))
- return 0;
- if (transp)
- transp++;
- }
- return 0;
-}
/**
* fb_default_cmap - get default colormap
return n < 0 ? d >> -n : d << n;
}
-static void fb_set_logocmap(struct fb_info *info,
+static void __init fb_set_logocmap(struct fb_info *info,
const struct linux_logo *logo)
{
struct fb_cmap palette_cmap;
palette_cmap.blue[j] = clut[2] << 8 | clut[2];
clut += 3;
}
- fb_set_cmap(&palette_cmap, info);
+ fb_set_cmap(&palette_cmap, 1, info);
}
}
-static void fb_set_logo_truepalette(struct fb_info *info,
+static void __init fb_set_logo_truepalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
}
}
-static void fb_set_logo_directpalette(struct fb_info *info,
+static void __init fb_set_logo_directpalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
palette[i] = i << redshift | i << greenshift | i << blueshift;
}
-static void fb_set_logo(struct fb_info *info,
+static void __init fb_set_logo(struct fb_info *info,
const struct linux_logo *logo, u8 *dst,
int depth)
{
}
int
-fb_cursor(struct fb_info *info, struct fb_cursor_user __user *sprite)
+fb_cursor(struct fb_info *info, struct fb_cursor *sprite)
{
- struct fb_cursor_user cursor_user;
struct fb_cursor cursor;
- char *data = NULL, *mask = NULL;
- u16 *red = NULL, *green = NULL, *blue = NULL, *transp = NULL;
- int err = -EINVAL;
+ int err;
- if (copy_from_user(&cursor_user, sprite, sizeof(struct fb_cursor_user)))
+ if (copy_from_user(&cursor, sprite, sizeof(struct fb_cursor)))
return -EFAULT;
- memcpy(&cursor, &cursor_user, sizeof(cursor));
- cursor.mask = NULL;
- cursor.image.data = NULL;
- cursor.image.cmap.red = NULL;
- cursor.image.cmap.green = NULL;
- cursor.image.cmap.blue = NULL;
- cursor.image.cmap.transp = NULL;
-
if (cursor.set & FB_CUR_SETCUR)
info->cursor.enable = 1;
if (cursor.set & FB_CUR_SETCMAP) {
- unsigned len = cursor.image.cmap.len;
- if ((int)len <= 0)
- goto out;
- len *= 2;
- err = -ENOMEM;
- red = kmalloc(len, GFP_USER);
- green = kmalloc(len, GFP_USER);
- blue = kmalloc(len, GFP_USER);
- if (!red || !green || !blue)
- goto out;
- if (cursor_user.image.cmap.transp) {
- transp = kmalloc(len, GFP_USER);
- if (!transp)
- goto out;
- }
- err = -EFAULT;
- if (copy_from_user(red, cursor_user.image.cmap.red, len))
- goto out;
- if (copy_from_user(green, cursor_user.image.cmap.green, len))
- goto out;
- if (copy_from_user(blue, cursor_user.image.cmap.blue, len))
- goto out;
- if (transp) {
- if (copy_from_user(transp,
- cursor_user.image.cmap.transp, len))
- goto out;
- }
- cursor.image.cmap.red = red;
- cursor.image.cmap.green = green;
- cursor.image.cmap.blue = blue;
- cursor.image.cmap.transp = transp;
+ err = fb_copy_cmap(&cursor.image.cmap, &sprite->image.cmap, 1);
+ if (err)
+ return err;
}
if (cursor.set & FB_CUR_SETSHAPE) {
int size = ((cursor.image.width + 7) >> 3) * cursor.image.height;
+ char *data, *mask;
if ((cursor.image.height != info->cursor.image.height) ||
(cursor.image.width != info->cursor.image.width))
cursor.set |= FB_CUR_SETSIZE;
- err = -ENOMEM;
- data = kmalloc(size, GFP_USER);
- mask = kmalloc(size, GFP_USER);
- if (!mask || !data)
- goto out;
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- err = -EFAULT;
- if (copy_from_user(data, cursor_user.image.data, size) ||
- copy_from_user(mask, cursor_user.mask, size))
- goto out;
+ mask = kmalloc(size, GFP_KERNEL);
+ if (!mask) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, sprite->image.data, size) ||
+ copy_from_user(mask, sprite->mask, size)) {
+ kfree(data);
+ kfree(mask);
+ return -EFAULT;
+ }
cursor.image.data = data;
cursor.mask = mask;
}
info->cursor.set = cursor.set;
info->cursor.rop = cursor.rop;
err = info->fbops->fb_cursor(info, &cursor);
-out:
- kfree(data);
- kfree(mask);
- kfree(red);
- kfree(green);
- kfree(blue);
- kfree(transp);
return err;
}
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
if (info->flags & FBINFO_MISC_MODECHANGEUSER) {
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
cmap.len = info->cmap.len;
} else
cmap = info->cmap;
- return fb_set_cmap(&cmap, info);
+ return fb_set_cmap(&cmap, 1, info);
}
static int
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
struct fb_con2fbmap con2fb;
#endif
- struct fb_cmap_user cmap;
- void __user *argp = (void __user *)arg;
+ struct fb_cmap cmap;
int i;
if (!fb)
return -ENODEV;
switch (cmd) {
case FBIOGET_VSCREENINFO:
- return copy_to_user(argp, &info->var,
+ return copy_to_user((void *) arg, &info->var,
sizeof(var)) ? -EFAULT : 0;
case FBIOPUT_VSCREENINFO:
- if (copy_from_user(&var, argp, sizeof(var)))
+ if (copy_from_user(&var, (void *) arg, sizeof(var)))
return -EFAULT;
acquire_console_sem();
info->flags |= FBINFO_MISC_MODECHANGEUSER;
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
release_console_sem();
if (i) return i;
- if (copy_to_user(argp, &var, sizeof(var)))
+ if (copy_to_user((void *) arg, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIOGET_FSCREENINFO:
- return copy_to_user(argp, &info->fix,
+ return copy_to_user((void *) arg, &info->fix,
sizeof(fix)) ? -EFAULT : 0;
case FBIOPUTCMAP:
- if (copy_from_user(&cmap, argp, sizeof(cmap)))
+ if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
return -EFAULT;
- return (fb_set_user_cmap(&cmap, info));
+ return (fb_set_cmap(&cmap, 0, info));
case FBIOGETCMAP:
- if (copy_from_user(&cmap, argp, sizeof(cmap)))
+ if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
return -EFAULT;
- return fb_cmap_to_user(&info->cmap, &cmap);
+ return (fb_copy_cmap(&info->cmap, &cmap, 2));
case FBIOPAN_DISPLAY:
- if (copy_from_user(&var, argp, sizeof(var)))
+ if (copy_from_user(&var, (void *) arg, sizeof(var)))
return -EFAULT;
acquire_console_sem();
i = fb_pan_display(info, &var);
release_console_sem();
if (i)
return i;
- if (copy_to_user(argp, &var, sizeof(var)))
+ if (copy_to_user((void *) arg, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIO_CURSOR:
acquire_console_sem();
- i = fb_cursor(info, argp);
+ i = fb_cursor(info, (struct fb_cursor *) arg);
release_console_sem();
return i;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
case FBIOGET_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
con2fb.framebuffer = con2fb_map[con2fb.console-1];
- return copy_to_user(argp, &con2fb,
+ return copy_to_user((void *)arg, &con2fb,
sizeof(con2fb)) ? -EFAULT : 0;
case FBIOPUT_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
return - EFAULT;
if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
#ifdef MEMCPYTOIO_WORKS
memcpy_toio(va.vaddr + offs, src, len);
#elif defined(MEMCPYTOIO_WRITEL)
+#define srcd ((const u_int32_t*)src)
if (offs & 3) {
while (len >= 4) {
- mga_writel(va, offs, get_unaligned((u32 *)src));
+ mga_writel(va, offs, get_unaligned(srcd++));
offs += 4;
len -= 4;
- src += 4;
}
} else {
while (len >= 4) {
- mga_writel(va, offs, *(u32 *)src);
+ mga_writel(va, offs, *srcd++);
offs += 4;
len -= 4;
- src += 4;
}
}
+#undef srcd
if (len) {
u_int32_t tmp;
break;
}
} else
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
return 0;
}
//TODO if (pxafb_blank_helper) pxafb_blank_helper(blank);
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, info);
+ fb_set_cmap(&fbi->fb.cmap, 1, info);
pxafb_schedule_work(fbi, C_ENABLE);
}
return 0;
dp = pci_device_to_OF_node(rinfo->pdev);
- xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL);
+ xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", 0);
rinfo->pll.ref_clk = *xtal / 10;
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
-#ifdef CONFIG_PMAC_BACKLIGHT
-#include <asm/backlight.h>
-#endif
#include "rivafb.h"
#include "nvreg.h"
* various helpful macros and constants
*
* ------------------------------------------------------------------------- */
-#ifdef CONFIG_FB_RIVA_DEBUG
-#define NVTRACE printk
+
+#undef RIVAFBDEBUG
+#ifdef RIVAFBDEBUG
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
#else
-#define NVTRACE if(0) printk
+#define DPRINTK(fmt, args...)
#endif
-#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __FUNCTION__)
-
-#ifdef CONFIG_FB_RIVA_DEBUG
+#ifndef RIVA_NDEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
{ "GeForce2-GTS", NV_ARCH_10 },
{ "GeForce2-ULTRA", NV_ARCH_10 },
{ "Quadro2-PRO", NV_ARCH_10 },
- { "GeForce4-MX-460", NV_ARCH_10 },
- { "GeForce4-MX-440", NV_ARCH_10 },
- { "GeForce4-MX-420", NV_ARCH_10 },
- { "GeForce4-440-GO", NV_ARCH_10 },
- { "GeForce4-420-GO", NV_ARCH_10 },
- { "GeForce4-420-GO-M32", NV_ARCH_10 },
- { "Quadro4-500-XGL", NV_ARCH_10 },
- { "GeForce4-440-GO-M64", NV_ARCH_10 },
- { "Quadro4-200", NV_ARCH_10 },
- { "Quadro4-550-XGL", NV_ARCH_10 },
- { "Quadro4-500-GOGL", NV_ARCH_10 },
- { "GeForce2", NV_ARCH_10 },
+ { "GeForce4-MX-460", NV_ARCH_20 },
+ { "GeForce4-MX-440", NV_ARCH_20 },
+ { "GeForce4-MX-420", NV_ARCH_20 },
+ { "GeForce4-440-GO", NV_ARCH_20 },
+ { "GeForce4-420-GO", NV_ARCH_20 },
+ { "GeForce4-420-GO-M32", NV_ARCH_20 },
+ { "Quadro4-500-XGL", NV_ARCH_20 },
+ { "GeForce4-440-GO-M64", NV_ARCH_20 },
+ { "Quadro4-200", NV_ARCH_20 },
+ { "Quadro4-550-XGL", NV_ARCH_20 },
+ { "Quadro4-500-GOGL", NV_ARCH_20 },
+ { "GeForce2", NV_ARCH_20 },
{ "GeForce3", NV_ARCH_20 },
{ "GeForce3 Ti 200", NV_ARCH_20 },
{ "GeForce3 Ti 500", NV_ARCH_20 },
0xEB /* MISC */
};
-/*
- * Backlight control
- */
-#ifdef CONFIG_PMAC_BACKLIGHT
-
-static int riva_backlight_levels[] = {
- 0x158,
- 0x192,
- 0x1c6,
- 0x200,
- 0x234,
- 0x268,
- 0x2a2,
- 0x2d6,
- 0x310,
- 0x344,
- 0x378,
- 0x3b2,
- 0x3e6,
- 0x41a,
- 0x454,
- 0x534,
-};
-
-static int riva_set_backlight_enable(int on, int level, void *data);
-static int riva_set_backlight_level(int level, void *data);
-static struct backlight_controller riva_backlight_controller = {
- riva_set_backlight_enable,
- riva_set_backlight_level
-};
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
/* ------------------------------------------------------------------------- *
*
* MMIO access macros
{
int i;
- NVTRACE_ENTER();
par->riva.LockUnlock(&par->riva, 0);
par->riva.UnloadStateExt(&par->riva, ®s->ext);
for (i = 0; i < NUM_SEQ_REGS; i++)
regs->seq[i] = SEQin(par, i);
- NVTRACE_LEAVE();
}
/**
RIVA_HW_STATE *state = ®s->ext;
int i;
- NVTRACE_ENTER();
CRTCout(par, 0x11, 0x00);
par->riva.LockUnlock(&par->riva, 0);
for (i = 0; i < NUM_SEQ_REGS; i++)
SEQout(par, i, regs->seq[i]);
- NVTRACE_LEAVE();
}
/**
struct riva_par *par = (struct riva_par *) info->par;
struct riva_regs newmode;
- NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(1, info);
riva_load_state(par, &par->current_state);
par->riva.LockUnlock(&par->riva, 0); /* important for HW cursor */
rivafb_blank(0, info);
- NVTRACE_LEAVE();
}
static void riva_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb)
{
- NVTRACE_ENTER();
var->xres = var->xres_virtual = modedb->xres;
var->yres = modedb->yres;
if (var->yres_virtual < var->yres)
var->vsync_len = modedb->vsync_len;
var->sync = modedb->sync;
var->vmode = modedb->vmode;
- NVTRACE_LEAVE();
}
/**
};
int i;
- NVTRACE_ENTER();
/* use highest possible virtual resolution */
if (var->xres_virtual == -1 && var->yres_virtual == -1) {
printk(KERN_WARNING PFX
if (modes[i].xres == -1) {
printk(KERN_ERR PFX
"could not find a virtual resolution that fits into video memory!!\n");
- NVTRACE("EXIT - EINVAL error\n");
+ DPRINTK("EXIT - EINVAL error\n");
return -EINVAL;
}
var->xres_virtual = modes[i].xres;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...resolution too high to fit into video memory!\n",
var->xres, var->yres, var->bits_per_pixel);
- NVTRACE("EXIT - EINVAL error\n");
+ DPRINTK("EXIT - EINVAL error\n");
return -EINVAL;
}
}
var->yres_virtual = 0x7fff/nom;
if (var->xres_virtual > 0x7fff/nom)
var->xres_virtual = 0x7fff/nom;
- NVTRACE_LEAVE();
+
return 0;
}
return rc;
}
-/* ------------------------------------------------------------------------- *
- *
- * Backlight operations
- *
- * ------------------------------------------------------------------------- */
-
-#ifdef CONFIG_PMAC_BACKLIGHT
-static int riva_set_backlight_enable(int on, int level, void *data)
-{
- struct riva_par *par = (struct riva_par *)data;
- U032 tmp_pcrt, tmp_pmc;
-
- tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
- tmp_pcrt = par->riva.PCRTC0[0x081C/4] & 0xFFFFFFFC;
- if(on && (level > BACKLIGHT_OFF)) {
- tmp_pcrt |= 0x1;
- tmp_pmc |= (1 << 31); // backlight bit
- tmp_pmc |= riva_backlight_levels[level-1] << 16; // level
- }
- par->riva.PCRTC0[0x081C/4] = tmp_pcrt;
- par->riva.PMC[0x10F0/4] = tmp_pmc;
- return 0;
-}
-
-static int riva_set_backlight_level(int level, void *data)
-{
- return riva_set_backlight_enable(1, level, data);
-}
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
/* ------------------------------------------------------------------------- *
*
* framebuffer operations
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
- NVTRACE_ENTER();
if (!cnt) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
riva_save_state(par, &par->initial_state);
}
atomic_inc(&par->ref_count);
- NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
- NVTRACE_ENTER();
if (!cnt)
return -EINVAL;
if (cnt == 1) {
par->riva.LockUnlock(&par->riva, 1);
}
atomic_dec(&par->ref_count);
- NVTRACE_LEAVE();
return 0;
}
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
- NVTRACE_ENTER();
switch (var->bits_per_pixel) {
case 1 ... 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...color depth not supported.\n",
var->xres, var->yres, var->bits_per_pixel);
- NVTRACE("EXIT, returning -EINVAL\n");
+ DPRINTK("EXIT, returning -EINVAL\n");
return -EINVAL;
}
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
- NVTRACE_LEAVE();
return 0;
}
{
struct riva_par *par = (struct riva_par *) info->par;
- NVTRACE_ENTER();
riva_common_setup(par);
RivaGetConfig(&par->riva, par->Chipset);
/* vgaHWunlock() + riva unlock (0x7F) */
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
- NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *)info->par;
unsigned int base;
- NVTRACE_ENTER();
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
- NVTRACE_LEAVE();
return 0;
}
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
vesa = CRTCin(par, 0x1a) & ~0xc0; /* sync on/off */
- NVTRACE_ENTER();
if (blank) {
tmp |= 0x20;
switch (blank - 1) {
}
SEQout(par, 0x01, tmp);
CRTCout(par, 0x1a, vesa);
-
-#ifdef CONFIG_PMAC_BACKLIGHT
- if ( par->FlatPanel && _machine == _MACH_Pmac) {
- set_backlight_enable(!blank);
- }
-#endif
-
- NVTRACE_LEAVE();
return 0;
}
{
unsigned int cmap_len;
- NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
info->pixmap.scan_align = 4;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->var.yres_virtual = -1;
- NVTRACE_LEAVE();
return (rivafb_check_var(&info->var, info));
}
"DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
int i;
- NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
disptype = (unsigned char *)get_property(dp, "display-type", NULL);
}
}
}
- NVTRACE_LEAVE();
return 0;
}
#endif /* CONFIG_PPC_OF */
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
- NVTRACE_ENTER();
/* respect mode options */
if (mode_option) {
fb_find_mode(var, info, mode_option,
riva_update_var(var, &modedb);
}
var->accel_flags |= FB_ACCELF_TEXT;
- NVTRACE_LEAVE();
}
static void riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
- struct riva_par *par;
- int i;
-
- NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
if (!riva_get_EDID_OF(info, pdev))
printk("rivafb: could not retrieve EDID from OF\n");
#else
/* XXX use other methods later */
#ifdef CONFIG_FB_RIVA_I2C
+ struct riva_par *par = (struct riva_par *) info->par;
+ int i;
- par = (struct riva_par *) info->par;
riva_create_i2c_busses(par);
for (i = par->bus; i >= 1; i--) {
riva_probe_i2c_connector(par, i, &par->EDID);
riva_delete_i2c_busses(par);
#endif
#endif
- NVTRACE_LEAVE();
}
struct riva_par *default_par;
struct fb_info *info;
- NVTRACE_ENTER();
assert(pd != NULL);
assert(rci != NULL);
info->fix.id,
info->fix.smem_len / (1024 * 1024),
info->fix.smem_start);
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (default_par->FlatPanel && _machine == _MACH_Pmac)
- register_backlight_controller(&riva_backlight_controller,
- default_par, "mnca");
-#endif
- NVTRACE_LEAVE();
return 0;
err_out_iounmap_fb:
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = (struct riva_par *) info->par;
- NVTRACE_ENTER();
if (!info)
return;
kfree(par);
kfree(info);
pci_set_drvdata(pd, NULL);
- NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
{
char *this_opt;
- NVTRACE_ENTER();
if (!options || !*options)
return 0;
} else
mode_option = this_opt;
}
- NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
case VESA_NO_BLANKING:
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, info);
+ fb_set_cmap(&fbi->fb.cmap, 1, info);
sa1100fb_schedule_work(fbi, C_ENABLE);
}
return 0;
#error Where is GPIO24 set as an output? Can we fit this in somewhere else?
if (machine_is_graphicsclient()) {
// From ADS doc again...same as disable
- msleep(20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(20 * HZ / 1000);
GPSR |= GPIO_GPIO24;
}
#endif
* We'll wait 20msec.
*/
GPCR |= GPIO_GPIO24;
- msleep(20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(20 * HZ / 1000);
}
#endif
#ifdef CONFIG_SA1100_HUW_WEBPANEL
{
switch(cmd) {
case FBIOGTYPE: {
- struct fbtype __user *f = (struct fbtype __user *) arg;
+ struct fbtype *f = (struct fbtype *) arg;
if (put_user(type, &f->fb_type) ||
__put_user(info->var.yres, &f->fb_height) ||
return 0;
}
case FBIOPUTCMAP_SPARC: {
- struct fbcmap __user *c = (struct fbcmap __user *) arg;
+ struct fbcmap *c = (struct fbcmap *) arg;
struct fb_cmap cmap;
u16 red, green, blue;
- unsigned char __user *ured;
- unsigned char __user *ugreen;
- unsigned char __user *ublue;
+ unsigned char *ured, *ugreen, *ublue;
int index, count, i;
if (get_user(index, &c->index) ||
return -EFAULT;
cmap.start = index + i;
- err = fb_set_cmap(&cmap, info);
+ err = fb_set_cmap(&cmap, 0, info);
if (err)
return err;
}
return 0;
}
case FBIOGETCMAP_SPARC: {
- struct fbcmap __user *c = (struct fbcmap __user *) arg;
- unsigned char __user *ured;
- unsigned char __user *ugreen;
- unsigned char __user *ublue;
+ struct fbcmap *c = (struct fbcmap *) arg;
+ unsigned char *ured, *ugreen, *ublue;
struct fb_cmap *cmap = &info->cmap;
int index, count, i;
if(con != ivideo->currcon) return;
if(fb_display[con].cmap.len) {
- fb_set_cmap(&fb_display[con].cmap, sisfb_setcolreg, info);
+ fb_set_cmap(&fb_display[con].cmap, 1, sisfb_setcolreg, info);
} else {
int size = sisfb_get_cmap_len(&fb_display[con].var);
- fb_set_cmap(fb_default_cmap(size), sisfb_setcolreg, info);
+ fb_set_cmap(fb_default_cmap(size), 1, sisfb_setcolreg, info);
}
}
info->cursor.image.fg_color = cursor->image.fg_color;
} else {
if (cursor->image.cmap.len)
- fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap);
+ fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap, 0);
}
info->cursor.image.depth = cursor->image.depth;
}
bg_color = ((cmap.red[cmap.start+1] << 16) |
(cmap.green[cmap.start+1] << 8) |
(cmap.blue[cmap.start+1]));
- fb_copy_cmap(&cmap, &info->cursor.image.cmap);
+ fb_copy_cmap(&cmap, &info->cursor.image.cmap, 0);
spin_lock_irqsave(&par->DAClock, flags);
banshee_make_room(par, 2);
tdfx_outl(par, HWCURC0, bg_color);
static int valkyriefb_blank(int blank_mode, struct fb_info *info);
static int read_valkyrie_sense(struct fb_info_valkyrie *p);
+static inline int valkyrie_vram_reqd(int video_mode, int color_mode);
static void set_valkyrie_clock(unsigned char *params);
+static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var);
static int valkyrie_var_to_par(struct fb_var_screeninfo *var,
struct fb_par_valkyrie *par, const struct fb_info *fb_info);
return 0;
}
-static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par,
- struct fb_var_screeninfo *var)
-{
- return mac_vmode_to_var(par->vmode, par->cmode, var);
-}
-
static int
valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
return 0;
}
-static inline int valkyrie_vram_reqd(int video_mode, int color_mode)
+static int valkyrie_vram_reqd(int video_mode, int color_mode)
{
int pitch;
struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1];
return 0;
}
+static int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var)
+{
+ return mac_vmode_to_var(par->vmode, par->cmode, var);
+}
+
static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p)
{
memset(fix, 0, sizeof(*fix));
config W1_MATROX
tristate "Matrox G400 transport layer for 1-wire"
- depends on W1 && PCI
+ depends on W1
help
Say Y here if you want to communicate with your 1-wire devices
using Matrox's G400 GPIO pins.
#include <asm/atomic.h>
#include <asm/types.h>
#include <asm/io.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
*/
#include <asm/atomic.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
printk(KERN_ERR
- "Failed to allocate %zd bytes for new w1 device.\n",
+ "Failed to allocate %d bytes for new w1 device.\n",
sizeof(struct w1_master));
return NULL;
}
*/
#include <asm/io.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/moduleparam.h>
#include "w1.h"
config JFFS_PROC_FS
bool "JFFS stats available in /proc filesystem"
- depends on JFFS_FS && PROC_FS
+ depends on JFFS_FS && PROC
help
Enabling this option will cause statistics from mounted JFFS file systems
to be made available to the user in the /proc/fs/jffs/ directory.
config JFFS2_FS
tristate "Journalling Flash File System v2 (JFFS2) support"
- select CRC32
depends on MTD
+ select CRC32
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
help
JFFS2 is the second generation of the Journalling Flash File System
for use on diskless embedded devices. It provides improved wear
messages at debug level 1 while the misbehaviour was occurring.
config JFFS2_FS_NAND
- bool "JFFS2 support for NAND flash"
- depends on JFFS2_FS
- default n
- help
- This enables the support for NAND flash in JFFS2. NAND is a newer
- type of flash chip design than the traditional NOR flash, with
- higher density but a handful of characteristics which make it more
- interesting for the file system to use.
-
- Say 'N' unless you have NAND flash.
-
-config JFFS2_COMPRESSION_OPTIONS
- bool "Advanced compression options for JFFS2"
- depends on JFFS2_FS
+ bool "JFFS2 support for NAND flash (EXPERIMENTAL)"
+ depends on JFFS2_FS && EXPERIMENTAL
default n
help
- Enabling this option allows you to explicitly choose which
- compression modules, if any, are enabled in JFFS2. Removing
- compressors and mean you cannot read existing file systems,
- and enabling experimental compressors can mean that you
- write a file system which cannot be read by a standard kernel.
+ This enables the experimental support for NAND flash in JFFS2. NAND
+ is a newer type of flash chip design than the traditional NOR flash,
+ with higher density but a handful of characteristics which make it
+ more interesting for the file system to use. Support for NAND flash
+ is not yet complete and may corrupt data. For further information,
+ including a link to the mailing list where details of the remaining
+ work to be completed for NAND flash support can be found, see the
+ JFFS2 web site at <http://sources.redhat.com/jffs2>.
- If unsure, you should _definitely_ say 'N'.
-
-config JFFS2_ZLIB
- bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
- depends on JFFS2_FS
- default y
- help
- Zlib is designed to be a free, general-purpose, legally unencumbered,
- lossless data-compression library for use on virtually any computer
- hardware and operating system. See http://www.gzip.org/zlib/ for
- further information.
-
- Say 'Y' if unsure.
-
-config JFFS2_RTIME
- bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
- depends on JFFS2_FS
- default y
- help
- Rtime does manage to recompress already-compressed data. Say 'Y' if unsure.
-
-config JFFS2_RUBIN
- bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS
- depends on JFFS2_FS
- default n
- help
- RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure.
-
-choice
- prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS
- default JFFS2_CMODE_PRIORITY
- depends on JFFS2_FS
- help
- You can set here the default compression mode of JFFS2 from
- the avaiable compression modes. Don't touch if unsure.
-
-config JFFS2_CMODE_NONE
- bool "no compression"
- help
- Uses no compression.
-
-config JFFS2_CMODE_PRIORITY
- bool "priority"
- help
- Tries the compressors in a predefinied order and chooses the first
- successful one.
-
-config JFFS2_CMODE_SIZE
- bool "size (EXPERIMENTAL)"
- help
- Tries all compressors and chooses the one which has the smallest
- result.
-
-endchoice
+ Say 'N' unless you have NAND flash and you are willing to test and
+ develop JFFS2 support for it.
config CRAMFS
tristate "Compressed ROM file system support"
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
-config CIFS_XATTR
- bool "CIFS extended attributes (EXPERIMENTAL)"
- depends on CIFS
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details). CIFS maps the name of
- extended attributes beginning with the user namespace prefix
- to SMB/CIFS EAs. EAs are stored on Windows servers without the
- user namespace prefix, but their names are seen by Linux cifs clients
- prefaced by the user namespace prefix. The system namespace
- (used by some filesystems to store ACLs) is not supported at
- this time.
-
- If unsure, say N.
-
config CIFS_POSIX
bool "CIFS POSIX Extensions (EXPERIMENTAL)"
depends on CIFS
obj-$(CONFIG_CRAMFS) += cramfs/
obj-$(CONFIG_RAMFS) += ramfs/
obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
-obj-$(CONFIG_RELAYFS_FS) += relayfs/
obj-$(CONFIG_CODA_FS) += coda/
obj-$(CONFIG_MINIX_FS) += minix/
obj-$(CONFIG_FAT_FS) += fat/
obj-$(CONFIG_XFS_FS) += xfs/
obj-$(CONFIG_AFS_FS) += afs/
obj-$(CONFIG_BEFS_FS) += befs/
-obj-$(CONFIG_EXTERNFS) += hostfs/
+obj-$(CONFIG_HOSTFS) += hostfs/
+obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_RCFS_FS) += rcfs/
--- /dev/null
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y := open.o read_write.o file_table.o buffer.o \
+ bio.o super.o block_dev.o char_dev.o stat.o exec.o pipe.o \
+ namei.o fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
+ dcache.o inode.o attr.o bad_inode.o file.o dnotify.o \
+ filesystems.o namespace.o seq_file.o xattr.o libfs.o \
+ fs-writeback.o mpage.o direct-io.o aio.o
+
+obj-$(CONFIG_EPOLL) += eventpoll.o
+obj-$(CONFIG_COMPAT) += compat.o
+
+nfsd-$(CONFIG_NFSD) := nfsctl.o
+obj-y += $(nfsd-y) $(nfsd-m)
+
+obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
+obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
+obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
+
+# binfmt_script is always there
+obj-y += binfmt_script.o
+
+obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
+obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o
+obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
+
+obj-$(CONFIG_FS_MBCACHE) += mbcache.o
+obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
+
+obj-$(CONFIG_QUOTA) += dquot.o
+obj-$(CONFIG_QFMT_V1) += quota_v1.o
+obj-$(CONFIG_QFMT_V2) += quota_v2.o
+obj-$(CONFIG_QUOTACTL) += quota.o
+
+obj-$(CONFIG_PROC_FS) += proc/
+obj-y += partitions/
+obj-$(CONFIG_SYSFS) += sysfs/
+obj-y += devpts/
+
+obj-$(CONFIG_PROFILING) += dcookies.o
+
+# Do not add any filesystems before this line
+obj-$(CONFIG_REISERFS_FS) += reiserfs/
+obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
+obj-$(CONFIG_JBD) += jbd/
+obj-$(CONFIG_EXT2_FS) += ext2/
+obj-$(CONFIG_CRAMFS) += cramfs/
+obj-$(CONFIG_RAMFS) += ramfs/
+obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
+obj-$(CONFIG_CODA_FS) += coda/
+obj-$(CONFIG_INTERMEZZO_FS) += intermezzo/
+obj-$(CONFIG_MINIX_FS) += minix/
+obj-$(CONFIG_FAT_FS) += fat/
+obj-$(CONFIG_UMSDOS_FS) += umsdos/
+obj-$(CONFIG_MSDOS_FS) += msdos/
+obj-$(CONFIG_VFAT_FS) += vfat/
+obj-$(CONFIG_BFS_FS) += bfs/
+obj-$(CONFIG_ISO9660_FS) += isofs/
+obj-$(CONFIG_DEVFS_FS) += devfs/
+obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
+obj-$(CONFIG_HFS_FS) += hfs/
+obj-$(CONFIG_VXFS_FS) += freevxfs/
+obj-$(CONFIG_NFS_FS) += nfs/
+obj-$(CONFIG_EXPORTFS) += exportfs/
+obj-$(CONFIG_NFSD) += nfsd/
+obj-$(CONFIG_LOCKD) += lockd/
+obj-$(CONFIG_NLS) += nls/
+obj-$(CONFIG_SYSV_FS) += sysv/
+obj-$(CONFIG_SMB_FS) += smbfs/
+obj-$(CONFIG_CIFS) += cifs/
+obj-$(CONFIG_NCP_FS) += ncpfs/
+obj-$(CONFIG_HPFS_FS) += hpfs/
+obj-$(CONFIG_NTFS_FS) += ntfs/
+obj-$(CONFIG_UFS_FS) += ufs/
+obj-$(CONFIG_EFS_FS) += efs/
+obj-$(CONFIG_JFFS_FS) += jffs/
+obj-$(CONFIG_JFFS2_FS) += jffs2/
+obj-$(CONFIG_AFFS_FS) += affs/
+obj-$(CONFIG_ROMFS_FS) += romfs/
+obj-$(CONFIG_QNX4FS_FS) += qnx4/
+obj-$(CONFIG_AUTOFS_FS) += autofs/
+obj-$(CONFIG_AUTOFS4_FS) += autofs4/
+obj-$(CONFIG_ADFS_FS) += adfs/
+obj-$(CONFIG_UDF_FS) += udf/
+obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
+obj-$(CONFIG_JFS_FS) += jfs/
+obj-$(CONFIG_XFS_FS) += xfs/
+obj-$(CONFIG_AFS_FS) += afs/
+obj-$(CONFIG_BEFS_FS) += befs/
#include <linux/fcntl.h>
#include <linux/quotaops.h>
#include <linux/security.h>
-#include <linux/vs_base.h>
-#include <linux/proc_fs.h>
-#include <linux/devpts_fs.h>
/* Taken over from the old code... */
if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
goto error;
}
-
- /* Check for evil vserver activity */
- if (vx_check(0, VX_ADMIN))
- goto fine;
-
- if (IS_BARRIER(inode)) {
- printk(KERN_WARNING
- "VSW: xid=%d messing with the barrier.\n",
- vx_current_xid());
- goto error;
- }
- switch (inode->i_sb->s_magic) {
- case PROC_SUPER_MAGIC:
- printk(KERN_WARNING
- "VSW: xid=%d messing with the procfs.\n",
- vx_current_xid());
- goto error;
- case DEVPTS_SUPER_MAGIC:
- if (vx_check(inode->i_xid, VX_IDENT))
- goto fine;
- printk(KERN_WARNING
- "VSW: xid=%d messing with the devpts.\n",
- vx_current_xid());
- goto error;
- }
fine:
retval = 0;
error:
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_XID)
- inode->i_xid = attr->ia_xid;
if (ia_valid & ATTR_ATIME)
inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_MTIME)
dn_mask |= DN_ATTRIB;
if (ia_valid & ATTR_GID)
dn_mask |= DN_ATTRIB;
- if (ia_valid & ATTR_XID)
- dn_mask |= DN_ATTRIB;
if (ia_valid & ATTR_SIZE)
dn_mask |= DN_MODIFY;
/* both times implies a utime(s) call */
error = security_inode_setattr(dentry, attr);
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
- (ia_valid & ATTR_XID && attr->ia_xid != inode->i_xid))
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
(current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
- current->mm->free_area_cache = current->mm->mmap_base;
-
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ /* unlimited stack is larger than TASK_SIZE */
+ current->mm->non_executable_cache = current->mm->mmap_top;
// current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->mmap = NULL;
return error;
}
- error = bprm->file->f_op->read(bprm->file,
- (char __user *)text_addr,
+ error = bprm->file->f_op->read(bprm->file, (char *)text_addr,
ex.a_text+ex.a_data, &pos);
if ((signed long)error < 0) {
send_sig(SIGKILL, current, 0);
if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,
- (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
flush_icache_range((unsigned long) N_TXTADDR(ex),
(unsigned long) N_TXTADDR(ex) +
do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- file->f_op->read(file, (char __user *)start_addr,
+ file->f_op->read(file, (char *)start_addr,
ex.a_text + ex.a_data, &pos);
flush_icache_range((unsigned long) start_addr,
(unsigned long) start_addr + ex.a_text + ex.a_data);
NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
if (interp_aout) {
argv = sp + 2;
envp = argv + argc + 1;
- __put_user((elf_addr_t)(unsigned long)argv, sp++);
- __put_user((elf_addr_t)(unsigned long)envp, sp++);
+ __put_user((elf_addr_t)(long)argv, sp++);
+ __put_user((elf_addr_t)(long)envp, sp++);
} else {
argv = sp;
envp = argv + argc + 1;
struct exec interp_ex;
char passed_fileno[6];
struct files_struct *files;
- int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
+ int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
unsigned long def_flags = 0;
/* Get the exec-header */
executable_stack = EXSTACK_DISABLE_X;
break;
}
- have_pt_gnu_stack = (i < elf_ex.e_phnum);
+ if (i == elf_ex.e_phnum)
+ def_flags |= VM_EXEC | VM_MAYEXEC;
relocexec = 0;
current->mm->end_data = 0;
current->mm->end_code = 0;
current->mm->mmap = NULL;
+#ifdef __HAVE_ARCH_MMAP_TOP
+ current->mm->mmap_top = mmap_top();
+#endif
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
SET_PERSONALITY(elf_ex, ibcs2_interpreter);
- if (elf_read_implies_exec(elf_ex, have_pt_gnu_stack))
- current->personality |= READ_IMPLIES_EXEC;
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
// current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
- current->mm->free_area_cache = current->mm->mmap_base;
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->non_executable_cache = current->mm->mmap_top;
retval = setup_arg_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
#include <linux/personality.h>
#include <linux/init.h>
#include <linux/flat.h>
-#include <linux/vs_memory.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <linux/shm.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
len, offset);
}
-/**
- * bio_uncopy_user - finish previously mapped bio
- * @bio: bio being terminated
- *
- * Free pages allocated from bio_copy_user() and write back data
- * to user space in case of a read.
- */
-int bio_uncopy_user(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i, ret = 0;
-
- if (bio_data_dir(bio) == READ) {
- char *uaddr = bio->bi_private;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- char *addr = page_address(bvec->bv_page);
-
- if (!ret && copy_to_user(uaddr, addr, bvec->bv_len))
- ret = -EFAULT;
-
- __free_page(bvec->bv_page);
- uaddr += bvec->bv_len;
- }
- }
-
- bio_put(bio);
- return ret;
-}
-
-/**
- * bio_copy_user - copy user data to bio
- * @q: destination block queue
- * @uaddr: start of user address
- * @len: length in bytes
- * @write_to_vm: bool indicating writing to pages or not
- *
- * Prepares and returns a bio for indirect user io, bouncing data
- * to/from kernel pages as necessary. Must be paired with
- * call bio_uncopy_user() on io completion.
- */
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
- unsigned int len, int write_to_vm)
-{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- struct bio_vec *bvec;
- struct page *page;
- struct bio *bio;
- int i, ret;
-
- bio = bio_alloc(GFP_KERNEL, end - start);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- ret = 0;
- while (len) {
- unsigned int bytes = PAGE_SIZE;
-
- if (bytes > len)
- bytes = len;
-
- page = alloc_page(q->bounce_gfp | GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
-
- if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
- ret = -EINVAL;
- break;
- }
-
- len -= bytes;
- }
-
- /*
- * success
- */
- if (!ret) {
- if (!write_to_vm) {
- bio->bi_rw |= (1 << BIO_RW);
- /*
- * for a write, copy in data to kernel pages
- */
- ret = -EFAULT;
- bio_for_each_segment(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-
- if (copy_from_user(addr, (char *) uaddr, bvec->bv_len))
- goto cleanup;
- }
- }
-
- bio->bi_private = (void *) uaddr;
- return bio;
- }
-
- /*
- * cleanup
- */
-cleanup:
- bio_for_each_segment(bvec, bio, i)
- __free_page(bvec->bv_page);
-
- bio_put(bio);
- return ERR_PTR(ret);
-}
-
static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len,
int write_to_vm)
* size for now, in the future we can relax this restriction
*/
if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
- return ERR_PTR(-EINVAL);
+ return NULL;
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
- return ERR_PTR(-ENOMEM);
+ return NULL;
- ret = -ENOMEM;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
- bio->bi_flags |= (1 << BIO_USER_MAPPED);
+ blk_queue_bounce(q, &bio);
return bio;
out:
kfree(pages);
bio_put(bio);
- return ERR_PTR(ret);
+ return NULL;
}
/**
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
+ * device.
*/
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
- if (IS_ERR(bio))
- return bio;
-
- /*
- * subtle -- if __bio_map_user() ended up bouncing a bio,
- * it would normally disappear when its bi_end_io is run.
- * however, we need it for the unmap, so grab an extra
- * reference to it
- */
- bio_get(bio);
+ if (bio) {
+ /*
+ * subtle -- if __bio_map_user() ended up bouncing a bio,
+ * it would normally disappear when its bi_end_io is run.
+ * however, we need it for the unmap, so grab an extra
+ * reference to it
+ */
+ bio_get(bio);
- if (bio->bi_size == len)
- return bio;
+ if (bio->bi_size < len) {
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio, 0);
+ return NULL;
+ }
+ }
- /*
- * don't support partial mappings
- */
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio);
- return ERR_PTR(-EINVAL);
+ return bio;
}
-static void __bio_unmap_user(struct bio *bio)
+static void __bio_unmap_user(struct bio *bio, int write_to_vm)
{
struct bio_vec *bvec;
int i;
* make sure we dirty pages we wrote to
*/
__bio_for_each_segment(bvec, bio, i, 0) {
- if (bio_data_dir(bio) == READ)
+ if (write_to_vm)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
+ * @write_to_vm: bool indicating whether pages were written to
*
- * Unmap a bio previously mapped by bio_map_user(). Must be called with
+ * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
+ * must be the same as passed into bio_map_user(). Must be called with
* a process context.
*
* bio_unmap_user() may sleep.
*/
-void bio_unmap_user(struct bio *bio)
+void bio_unmap_user(struct bio *bio, int write_to_vm)
{
- __bio_unmap_user(bio);
+ __bio_unmap_user(bio, write_to_vm);
bio_put(bio);
}
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);
-EXPORT_SYMBOL(bio_copy_user);
-EXPORT_SYMBOL(bio_uncopy_user);
-Version 1.22
-------------
-Add config option to enable XATTR (extended attribute) support, mapping
-xattr names in the "user." namespace space to SMB/CIFS EAs.
-
-Version 1.21
-------------
-Add new mount parm to control whether mode check (vfs_permission) is done on
-the client. If Unix extensions are enabled and the uids on the client
-and server do not match, client permission checks are meaningless on
-server uids that do not exist on the client (this does not affect the
-normal ACL check which occurs on the server). Fix default uid
-on mknod to match create and mkdir. Add optional mount parm to allow
-override of the default uid behavior (in which the server sets the uid
-and gid of newly created files). Normally for network filesystem mounts
-user want the server to set the uid/gid on newly created files (rather than
-using uid of the client processes you would in a local filesystem).
-
Version 1.20
------------
Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
5) make dep
6) make modules (or "make" if CIFS VFS not to be built as a module)
-For Linux 2.6:
+For Linux 2.5:
1) Download the kernel (e.g. from http://www.kernel.org or from bitkeeper
at bk://linux.bkbits.net/linux-2.5) and change directory into the top
of the kernel directory tree (e.g. /usr/src/linux-2.5.73)
similar files reside (usually /sbin). Although the helper software is not
required, mount.cifs is recommended. Eventually the Samba 3.0 utility program
"net" may also be helpful since it may someday provide easier mount syntax for
-users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
+users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
Note that running the Winbind pam/nss module (logon service) on all of your
Linux clients is useful in mapping Uids and Gids consistently across the
domain to the proper network user. The mount.cifs mount helper can be
gcc samba/source/client/mount.cifs.c -o mount.cifs
-Allowing User Mounts
-====================
-To permit users to mount and unmount over directories they own is possible
-with the cifs vfs. A way to enable such mounting is to mark the mount.cifs
-utility as suid (e.g. "chmod +s /sbin/mount/cifs). To enable users to
-umount shares they mount requires
-1) mount.cifs version 1.4 or later
-2) an entry for the share in /etc/fstab indicating that a user may
-unmount it e.g.
-//server/usersharename /mnt/username cifs user 0 0
-
Note that when the mount.cifs utility is run suid (allowing user mounts),
in order to reduce risks, the "nosuid" mount flag is passed in on mount to
disallow execution of an suid program mounted on the remote target.
delete readonly = yes
ea support = yes
-Note that server ea support is required for supporting xattrs from the Linux
-cifs client, and that EA support is present in later versions of Samba (e.g.
-3.0.6 and later (also EA support works in all versions of Windows, at least to
-shares on NTFS filesystems). Extended Attribute (xattr) support is an optional
-feature of most Linux filesystems which may require enabling via
-make menuconfig
-
-Some administrators may want to change Samba's smb.conf "map archive" and
-"create mask" parameters from the default. Creating special devices (mknod)
+Note that ea support is required for supporting Linux xattrs.
+Some administrators also change the "map archive" and the "create mask"
+parameters from their default values. Creating special devices (mknod)
remotely may require specifying a mkdev function to Samba if you are not using
-Samba 3.0.6 or later. For more information on these see the manual pages
+Samba 3.0.5 or later. For more information on these see the manual pages
("man smb.conf") on the Samba server system. Note that the cifs vfs,
unlike the smbfs vfs, does not read the smb.conf on the client system
(the few optional settings are passed in on mount via -o parameters instead).
Note that Samba 2.2.7 or later includes a fix that allows the CIFS VFS to delete
open files (required for strict POSIX compliance). Windows Servers already
supported this feature. Samba server does not allow symlinks that refer to files
-outside of the share, so in Samba versions prior to 3.0.6, most symlinks to
+outside of the share, so in Samba versions prior to 3.0.5, most symlinks to
files with absolute paths (ie beginning with slash) such as:
ln -s /mnt/foo bar
-would be forbidden. Samba 3.0.6 server or later includes the ability to create
+would be forbidden. Samba 3.0.5 server or later includes the ability to create
such symlinks safely by converting unsafe symlinks (ie symlinks to server
files that are outside of the share) to a samba specific format on the server
that is ignored by local server applications and non-cifs clients and that will
running an altered binary on your local system (downloaded from a hostile server
or altered by a hostile router).
-Although mounting using format corresponding to the CIFS URL specification is
-not possible in mount.cifs yet, it is possible to use an alternate format
-for the server and sharename (which is somewhat similar to NFS style mount
-syntax) instead of the more widely used UNC format (i.e. \\server\share):
- mount -t cifs tcp_name_of_server:share_name /mnt -o user=myname,pass=mypasswd
-
When using the mount helper mount.cifs, passwords may be specified via alternate
mechanisms, instead of specifying it after -o using the normal "pass=" syntax
on the command line:
mount helper will not prompt the user for a password
if guest is specified on the mount options. If no
password is specified a null password will be used.
- perm Client does permission checks (vfs_permission check of uid
- and gid of the file against the mode and desired operation),
- Note that this is in addition to the normal ACL check on the
- target machine done by the server software.
- Client permission checking is enabled by default.
- noperm Client does not do permission checks. This can expose
- files on this mount to access by other users on the local
- client system. It is typically only needed when the server
- supports the CIFS Unix Extensions but the UIDs/GIDs on the
- client and server system do not match closely enough to allow
- access by the user doing the mount.
- Note that this does not affect the normal ACL check on the
- target machine done by the server software (of the server
- ACL against the user name provided at mount time).
- setuids If the CIFS Unix extensions are negotiated with the server
- the client will attempt to set the effective uid and gid of
- the local process on newly created files, directories, and
- devices (create, mkdir, mknod).
- nosetuids The client will not attempt to set the uid and gid on
- on newly created files, directories, and devices (create,
- mkdir, mknod) which will result in the server setting the
- uid and gid to the default (usually the server uid of the
- usern who mounted the share). Letting the server (rather than
- the client) set the uid and gid is the default. This
- parameter has no effect if the CIFS Unix Extensions are not
- negotiated.
-
+
The mount.cifs mount helper also accepts a few mount options before -o
including:
echo 1 > /proc/fs/cifs/traceSMB
-Two other experimental features are under development and to test
+Three other experimental features are under development and to test
require enabling an ifdef (e.g. by adding "#define CIFS_FCNTL" in cifsglob.h)
CONFIG_CIFS_QUOTA
+ CONFIG_CIFS_XATTR
+
CONFIG_CIFS_FCNTL (fcntl needed for support of directory change
notification and perhaps later for file leases)
-version 1.22 July 30, 2004
+version 1.16 May 27, 2004
A Partial List of Missing Features
==================================
a) Support for SecurityDescriptors for chmod/chgrp/chown so
these can be supported for Windows servers
-b) Better pam/winbind integration (e.g. to handle uid mapping
-better)
+b) Better pam/winbind integration
c) multi-user mounts - multiplexed sessionsetups over single vc
(ie tcp session) - prettying up needed
h) quota support
-j) finish writepages support (multi-page write behind for improved
+i) support for the Linux 2.5 kernel new feature get_xattr and set_xattr
+which will allow us to expose dos attributes as well as real
+ACLs. This support has been started in the current code, but is
+ifdeffed out.
+
+k) finish writepages support (multi-page write behind for improved
performance) and syncpage
-k) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
+l) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
extra copy in/out of the socket buffers in some cases.
-l) finish support for IPv6. This is mostly complete but
+m) finish support for IPv6. This is mostly complete but
needs a simple inet_pton like function to convert ipv6
addresses in string representation.
-m) Better optimize open (and pathbased setfilesize) to reduce the
+o) Better optimize open (and pathbased setfilesize) to reduce the
oplock breaks coming from windows srv. Piggyback identical file
opens on top of each other by incrementing reference count rather
than resending (helps reduce server resource utilization and avoid
spurious oplock breaks).
-o) Improve performance of readpages by sending more than one read
+p) Improve performance of readpages by sending more than one read
at a time when 8 pages or more are requested. Evaluate whether
reads larger than 16K would be helpful.
-p) For support of Windows9x/98 we need to retry failed mounts
+q) For support of Windows9x/98 we need to retry failed mounts
to *SMBSERVER (default server name) with the uppercase hostname
in the RFC1001 session_init request.
-q) Add support for storing symlink and fifo info to Windows servers
-in the Extended Attribute format their SFU clients would recognize.
+r) Add Extended Attributed support (for storing UID/GID info
+to Windows servers)
-r) Finish fcntl D_NOTIFY support so kde and gnome file list windows
+s) Finish fcntl D_NOTIFY support so kde and gnome file list windows
will autorefresh
-s) Add GUI tool to configure /proc/fs/cifs settings and for display of
+t) Add GUI tool to configure /proc/fs/cifs settings and for display of
the CIFS statistics
KNOWN BUGS (updated May 27, 2004)
differences but worth investigating). Also debug Samba to
see why lock test case 7 takes longer to complete to Samba
than to Windows.
-5) implement search rewind (seeking backward in a readdir), which is
-necessary for one of the "special" subsection of posix file API
-tests in the Connectathon nfs test suite.
Misc testing to do
==================
1) check out max path names and max path name components against various server
-types. Try nested symlinks (8 deep). Return max path name in stat -f information
+types. Try nested symlinks. Return max path name in stat -f information
2) Modify file portion of ltp so it can run against a mounted network
share and run it against cifs vfs.
negotiated size) and send larger write sizes to modern servers.
4) More exhaustively test the recently added NT4 support against various
-NT4 service pack levels, and fix cifs_setattr for setting file times and
-size to fall back to level 1 when error invalid level returned.
+NT4 service pack levels.
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
-#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
-#define CIFS_MOUNT_SET_UID 2 /* set current->euid in create etc. */
-
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
struct list_head nested_tcon_q;
gid_t mnt_gid;
mode_t mnt_file_mode;
mode_t mnt_dir_mode;
- int mnt_cifs_flags;
};
#endif /* _CIFS_FS_SB_H */
#include "cifs_fs_sb.h"
#include <linux/mm.h>
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+/* BB when mempool_resize is added back in, we will resize pool on new mount */
+#define CIFS_MIN_RCV_POOL 11 /* enough for progress to five servers */
#ifdef CONFIG_CIFS_QUOTA
static struct quotactl_ops cifs_quotactl_ops;
static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
{
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb;
- cifs_sb = CIFS_SB(inode->i_sb);
+ cifs_sb = CIFS_SB(inode->i_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
+ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+ /* the server supports the Unix-like mode bits and does its
+ own permission checks, and therefore we do not allow the file
+ mode to be overriden on these mounts - so do not do perm
+ check on client side */
return 0;
} else /* file mode might have been restricted at mount time
on the client (above and beyond ACL on servers) for
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.sendfile = generic_file_sendfile,
- .dir_notify = cifs_dir_notify,
+#ifdef CONFIG_CIFS_FCNTL
+ .fcntl = cifs_fcntl,
+#endif
};
struct file_operations cifs_dir_ops = {
.readdir = cifs_readdir,
.release = cifs_closedir,
.read = generic_read_dir,
- .dir_notify = cifs_dir_notify,
+#ifdef CONFIG_CIFS_FCNTL
+ .fcntl = cifs_fcntl,
+#endif
};
static void
*/
atomic_set(&sesInfoAllocCount, 0);
atomic_set(&tconInfoAllocCount, 0);
- atomic_set(&tcpSesAllocCount,0);
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
#define TRUE 1
#endif
+extern int map_cifs_error(int error_class, int error_code,
+ int status_codes_negotiated);
+
extern struct address_space_operations cifs_addr_ops;
/* Functions related to super block operations */
extern struct super_operations cifs_super_ops;
+extern void cifs_put_inode(struct inode *);
extern void cifs_read_inode(struct inode *);
extern void cifs_delete_inode(struct inode *);
/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
extern struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
-extern int cifs_dir_notify(struct file *, unsigned long arg);
+extern long cifs_fcntl(int, unsigned int, unsigned long, struct file *);
/* Functions related to dir entries */
extern struct dentry_operations cifs_dentry_ops;
termination then *2 for unicode versions */
#define MAX_PASSWORD_SIZE 16
-#define CIFS_MIN_RCV_POOL 4
-
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurently. It also matches the most common
*/
GLOBAL_EXTERN atomic_t sesInfoAllocCount;
GLOBAL_EXTERN atomic_t tconInfoAllocCount;
-GLOBAL_EXTERN atomic_t tcpSesAllocCount;
+
GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
/* PathInfo/FileInfo infolevels */
#define SMB_INFO_STANDARD 1
-#define SMB_SET_FILE_EA 2
-#define SMB_QUERY_FILE_EA_SIZE 2
#define SMB_INFO_QUERY_EAS_FROM_LIST 3
#define SMB_INFO_QUERY_ALL_EAS 4
#define SMB_INFO_IS_NAME_VALID 6
char LinkDest[1];
} FILE_UNIX_LINK_INFO; /* level 513 QPathInfo */
-typedef struct {
- __u16 CreationDate;
- __u16 CreationTime;
- __u16 LastAccessDate;
- __u16 LastAccessTime;
- __u16 LastWriteDate;
- __u16 LastWriteTime;
- __u32 DataSize; /* File Size (EOF) */
- __u32 AllocationSize;
- __u16 Attributes; /* verify not u32 */
- __u32 EASize;
-} FILE_INFO_STANDARD; /* level 1 SetPath/FileInfo */
-
/* defines for enumerating possible values of the Unix type field below */
#define UNIX_FILE 0
#define UNIX_DIR 1
} FILE_DIRECTORY_INFO; /* level 257 FF response data area */
struct gea {
- unsigned char name_len;
- char name[1];
+ unsigned char cbName;
+ char szName[1];
};
struct gealist {
- unsigned long list_len;
+ unsigned long cbList;
struct gea list[1];
};
unsigned char EA_flags;
__u8 name_len;
__u16 value_len;
- char name[1];
+ char szName[1];
/* optionally followed by value */
};
/* flags for _FEA.fEA */
const struct nls_table *nls_codepage);
extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const FILE_BASIC_INFO * data,
+ char *fileName, FILE_BASIC_INFO * data,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, __u64 size,int setAllocationSizeFlag,
+ char *fileName, __u64 size,int setAllocationSizeFlag,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
__u64 size, __u16 fileHandle,__u32 opener_pid, int AllocSizeFlag);
extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
extern void CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
+
+extern int CIFSBuildServerList(int xid, char *serverBufferList,
+ int recordlength, int *entries,
+ int *totalEntries, int *topoChangedFlag);
+extern int CIFSSMBQueryShares(int xid, struct cifsTconInfo *tcon,
+ struct shareInfo *shareList, int bufferLen,
+ int *entries, int *totalEntries);
+extern int CIFSSMBQueryAlias(int xid, struct cifsTconInfo *tcon,
+ struct aliasInfo *aliasList, int bufferLen,
+ int *entries, int *totalEntries);
+extern int CIFSSMBAliasInfo(int xid, struct cifsTconInfo *tcon,
+ char *aliasName, char *serverName,
+ char *shareName, char *comment);
+extern int CIFSSMBGetShareInfo(int xid, struct cifsTconInfo *tcon,
+ char *share, char *comment);
+extern int CIFSSMBGetUserPerms(int xid, struct cifsTconInfo *tcon,
+ char *userName, char *searchName, int *perms);
+extern int CIFSSMBSync(int xid, struct cifsTconInfo *tcon, int netfid, int pid);
+
+extern int CIFSSMBSeek(int xid,
+ struct cifsTconInfo *tcon,
+ int netfid,
+ int pid,
+ int whence, unsigned long offset, long long *newoffset);
+
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs,const __u16 netfid,__u32 filter,
const struct nls_table *nls_codepage);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char * EAData,
- size_t bufsize, const struct nls_table *nls_codepage);
-extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
- const struct nls_table *nls_codepage);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const char * ea_name,
- const void * ea_value, const __u16 ea_value_len,
- const struct nls_table *nls_codepage);
+extern int CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName,
+ char * EAData, size_t size,
+ const struct nls_table *nls_codepage);
#endif /* _CIFSPROTO_H */
}
int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSAttributeInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSDeviceInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSUnixInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEOF(int xid, struct cifsTconInfo *tcon, char *fileName,
__u64 size, int SetAllocation, const struct nls_table *nls_codepage)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
}
int
-CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const FILE_BASIC_INFO * data,
- const struct nls_table *nls_codepage)
+CIFSSMBSetTimes(int xid, struct cifsTconInfo *tcon, char *fileName,
+ FILE_BASIC_INFO * data, const struct nls_table *nls_codepage)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
return rc;
}
-
-int
-CIFSSMBSetTimesLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
- FILE_INFO_STANDARD * data, const struct nls_table *nls_codepage)
-{
- TRANSACTION2_SPI_REQ *pSMB = NULL;
- TRANSACTION2_SPI_RSP *pSMBr = NULL;
- int name_len;
- int rc = 0;
- int bytes_returned = 0;
- char *data_offset;
-
- cFYI(1, ("In SetTimesLegacy"));
-
-SetTimesRetryLegacy:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
- }
-/* BB fixme - we have to map to FILE_STANDARD_INFO (level 1 info
- in parent function, from the better and ususal FILE_BASIC_INFO */
- pSMB->ParameterCount = 6 + name_len;
- pSMB->DataCount = sizeof (FILE_INFO_STANDARD);
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
- pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
- data_offset = (char *) (&pSMB->hdr.Protocol) + pSMB->DataOffset;
- pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
- pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
- pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
-
- pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
- pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
- pSMB->TotalDataCount = pSMB->DataCount;
- pSMB->TotalParameterCount = pSMB->ParameterCount;
- /* I doubt that passthrough levels apply to this old
- preNT info level */
-/* if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
- pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
- else*/
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- memcpy(data_offset, data, sizeof (FILE_INFO_STANDARD));
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("SetPathInfo (times legacy) returned %d", rc));
- }
-
- if (pSMB)
- cifs_buf_release(pSMB);
-
- if (rc == -EAGAIN)
- goto SetTimesRetryLegacy;
-
- return rc;
-}
-
int
CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
char *fileName, __u64 mode, __u64 uid, __u64 gid,
return rc;
}
#ifdef CONFIG_CIFS_XATTR
-ssize_t
+int
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char * EAData, size_t buf_size,
+ char * EAData, size_t size,
const struct nls_table *nls_codepage)
{
/* BB assumes one setup word */
int rc = 0;
int bytes_returned;
int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, 530);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) +
pSMBr->DataOffset);
- ea_response_data->list_len =
- cpu_to_le32(ea_response_data->list_len);
cFYI(1,("ea length %d",ea_response_data->list_len));
- name_len = ea_response_data->list_len;
- if(name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- while(name_len > 0) {
- name_len -= 4;
- temp_ptr += 4;
- rc += temp_fea->name_len;
- /* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if(rc<buf_size) {
- memcpy(EAData,"user.",5);
- EAData+=5;
- memcpy(EAData,temp_ptr,temp_fea->name_len);
- EAData+=temp_fea->name_len;
- /* null terminate name */
- *EAData = 0;
- EAData = EAData + 1;
- } else if(buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
- name_len -= temp_fea->value_len;
- temp_ptr += temp_fea->value_len;
- /* BB check that temp_ptr is still within smb BB*/
- /* no trailing null to account for in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
}
}
if (pSMB)
return rc;
}
-
-ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
- const struct nls_table *nls_codepage)
-{
- TRANSACTION2_QPI_REQ *pSMB = NULL;
- TRANSACTION2_QPI_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
-
- cFYI(1, ("In Query EA path %s", searchName));
-QEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
- }
-
- pSMB->TotalParameterCount = 2 /* level */ + 4 /* reserved */ +
- name_len /* includes null */ ;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
- pSMB->ByteCount = pSMB->TotalParameterCount + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(pSMB->TotalParameterCount);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("Send error in Query EA = %d", rc));
- } else { /* decode response */
- pSMBr->DataOffset = le16_to_cpu(pSMBr->DataOffset);
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if ((pSMBr->ByteCount < 4) || (pSMBr->DataOffset > 512))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- pSMBr->DataOffset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- struct fealist * ea_response_data;
- rc = -ENOENT;
- /* validate_trans2_offsets() */
- /* BB to check if(start of smb + pSMBr->DataOffset > &bcc+ bcc)*/
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- pSMBr->DataOffset);
- ea_response_data->list_len =
- cpu_to_le32(ea_response_data->list_len);
- cFYI(1,("ea length %d",ea_response_data->list_len));
- name_len = ea_response_data->list_len;
- if(name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- /* loop through checking if we have a matching
- name and then return the associated value */
- while(name_len > 0) {
- name_len -= 4;
- temp_ptr += 4;
- temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if(memcmp(temp_fea->name,ea_name,
- temp_fea->name_len) == 0) {
- /* found a match */
- rc = temp_fea->value_len;
- /* account for prefix user. and trailing null */
- if(rc<=buf_size) {
- memcpy(ea_value,
- temp_fea->name+temp_fea->name_len+1,
- rc);
- /* ea values, unlike ea names,
- are not null terminated */
- } else if(buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- }
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- name_len -= temp_fea->value_len;
- temp_ptr += temp_fea->value_len;
- /* no trailing null to account for in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
- }
- }
- if (pSMB)
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto QEARetry;
-
- return rc;
-}
-
-int
-CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const char * ea_name, const void * ea_value,
- const __u16 ea_value_len, const struct nls_table *nls_codepage)
-{
- struct smb_com_transaction2_spi_req *pSMB = NULL;
- struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
- struct fealist *parm_data;
- int name_len;
- int rc = 0;
- int bytes_returned = 0;
-
- cFYI(1, ("In SetEA"));
-SetEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
- }
-
- pSMB->ParameterCount = 6 + name_len;
-
- /* done calculating parms using name_len of file name,
- now use name_len to calculate length of ea name
- we are going to create in the inode xattrs */
- if(ea_name == NULL)
- name_len = 0;
- else
- name_len = strnlen(ea_name,255);
-
- pSMB->DataCount = sizeof(*parm_data) + ea_value_len + name_len + 1;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
- pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
- pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_EA);
-
- parm_data =
- (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
- pSMB->DataOffset);
- pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
- pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
- pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
- pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
- parm_data->list_len = (__u32)(pSMB->DataCount);
- parm_data->list[0].EA_flags = 0;
- /* we checked above that name len is less than 255 */
- parm_data->list[0].name_len = (__u8)name_len;;
- /* EA names are always ASCII */
- strncpy(parm_data->list[0].name,ea_name,name_len);
- parm_data->list[0].name[name_len] = 0;
- parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
- /* caller ensures that ea_value_len is less than 64K but
- we need to ensure that it fits within the smb */
-
- /*BB add length check that it would fit in negotiated SMB buffer size BB */
- /* if(ea_value_len > buffer_size - 512 (enough for header)) */
- if(ea_value_len)
- memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
-
- pSMB->TotalDataCount = pSMB->DataCount;
- pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
- pSMB->TotalParameterCount = pSMB->ParameterCount;
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("SetPathInfo (EA) returned %d", rc));
- }
-
- if (pSMB)
- cifs_buf_release(pSMB);
-
- if (rc == -EAGAIN)
- goto SetEARetry;
-
- return rc;
-}
-
#endif
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
-#include <linux/mempool.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include "cifspdu.h"
unsigned char *p24);
extern int cifs_inet_pton(int, const char *, void *dst);
-extern mempool_t *cifs_req_poolp;
-
struct smb_vol {
char *username;
char *password;
int rw:1;
int retry:1;
int intr:1;
- int setuids:1;
- int noperm:1;
unsigned int rsize;
unsigned int wsize;
unsigned int sockopt;
unsigned int pdu_length, total_read;
struct smb_hdr *smb_buffer = NULL;
struct msghdr smb_msg;
- struct kvec iov;
+ mm_segment_t temp_fs;
+ struct iovec iov;
struct socket *csocket = server->ssocket;
struct list_head *tmp;
struct cifsSesInfo *ses;
current->flags |= PF_MEMALLOC;
server->tsk = current; /* save process info to wake at shutdown */
cFYI(1, ("Demultiplex PID: %d", current->pid));
- write_lock(&GlobalSMBSeslock);
- atomic_inc(&tcpSesAllocCount);
- length = tcpSesAllocCount.counter;
- write_unlock(&GlobalSMBSeslock);
- if(length > 1) {
- mempool_resize(cifs_req_poolp,
- length + CIFS_MIN_RCV_POOL,
- GFP_KERNEL);
- }
+
+ temp_fs = get_fs(); /* we must turn off socket api parm checking */
+ set_fs(get_ds());
while (server->tcpStatus != CifsExiting) {
if (smb_buffer == NULL)
iov.iov_base = smb_buffer;
iov.iov_len = sizeof (struct smb_hdr) - 1;
/* 1 byte less above since wct is not always returned in error cases */
+ smb_msg.msg_iov = &iov;
+ smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
length =
- kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
- sizeof (struct smb_hdr) -
- 1 /* RFC1001 header and SMB header */ ,
- MSG_PEEK /* flags see socket.h */ );
+ sock_recvmsg(csocket, &smb_msg,
+ sizeof (struct smb_hdr) -
+ 1 /* RFC1001 header and SMB header */ ,
+ MSG_PEEK /* flags see socket.h */ );
if(server->tcpStatus == CifsExiting) {
break;
if (temp[0] == (char) RFC1002_SESSION_KEEP_ALIVE) {
iov.iov_base = smb_buffer;
iov.iov_len = 4;
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, 4, 0);
+ length = sock_recvmsg(csocket, &smb_msg, 4, 0);
cFYI(0,("Received 4 byte keep alive packet"));
} else if (temp[0] == (char) RFC1002_POSITIVE_SESSION_RESPONSE) {
- iov.iov_base = smb_buffer;
- iov.iov_len = 4;
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, 4, 0);
+ iov.iov_base = smb_buffer;
+ iov.iov_len = 4;
+ length = sock_recvmsg(csocket, &smb_msg, 4, 0);
cFYI(1,("Good RFC 1002 session rsp"));
} else if ((temp[0] == (char)RFC1002_NEGATIVE_SESSION_RESPONSE)
&& (length == 5)) {
for (total_read = 0;
total_read < pdu_length;
total_read += length) {
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
+ length = sock_recvmsg(csocket, &smb_msg,
pdu_length - total_read, 0);
if (length == 0) {
cERROR(1,
("Frame less than four bytes received %d bytes long.",
length));
if (length > 0) {
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
- length, 0); /* throw away junk frame */
+ length = sock_recvmsg(csocket, &smb_msg, length, 0); /* throw away junk frame */
cFYI(1,
(" with junk 0x%x in it ",
*(__u32 *) smb_buffer));
sock_release(csocket);
server->ssocket = NULL;
}
+ set_fs(temp_fs);
if (smb_buffer) /* buffer usually freed in free_mid - need to free it on error or exit */
cifs_buf_release(smb_buffer);
}
kfree(server);
- write_lock(&GlobalSMBSeslock);
- atomic_dec(&tcpSesAllocCount);
- length = tcpSesAllocCount.counter;
- write_unlock(&GlobalSMBSeslock);
- if(length > 0) {
- mempool_resize(cifs_req_poolp,
- length + CIFS_MIN_RCV_POOL,
- GFP_KERNEL);
- }
-
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
return 0;
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
vol->retry = 0;
- } else if (strnicmp(data, "perm", 4) == 0) {
- vol->noperm = 0;
- } else if (strnicmp(data, "noperm", 6) == 0) {
- vol->noperm = 1;
- } else if (strnicmp(data, "setuids", 7) == 0) {
- vol->setuids = 1;
- } else if (strnicmp(data, "nosetuids", 9) == 0) {
- vol->setuids = 0;
} else if (strnicmp(data, "nohard", 6) == 0) {
vol->retry = 0;
} else if (strnicmp(data, "nosoft", 6) == 0) {
cifs_sb->mnt_file_mode = volume_info.file_mode;
cifs_sb->mnt_dir_mode = volume_info.dir_mode;
cFYI(1,("file mode: 0x%x dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
-
- if(volume_info.noperm)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
- if(volume_info.setuids)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
-
tcon =
find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
volume_info.username);
then we now have to set the mode if possible */
if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(oplock & CIFS_CREATE_ACTION))
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)current->euid,
- (__u64)current->egid,
- 0 /* dev */,
- cifs_sb->local_nls);
- } else {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
(__u64)-1,
(__u64)-1,
0 /* dev */,
cifs_sb->local_nls);
- }
else {
/* BB implement via Windows security descriptors */
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
rc = -ENOMEM;
if (full_path && (pTcon->ses->capabilities & CAP_UNIX)) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
- mode,(__u64)current->euid,(__u64)current->egid,
- device_number, cifs_sb->local_nls);
- } else {
- rc = CIFSSMBUnixSetPerms(xid, pTcon,
- full_path, mode, (__u64)-1, (__u64)-1,
- device_number, cifs_sb->local_nls);
- }
-
+ rc = CIFSSMBUnixSetPerms(xid, pTcon,
+ full_path, mode, current->euid, current->egid,
+ device_number, cifs_sb->local_nls);
if(!rc) {
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb,xid);
#include "cifs_unicode.h"
#include "cifs_debug.h"
-int cifs_dir_notify(struct file * file, unsigned long arg)
+int cifs_directory_notify(unsigned long arg, struct file * file)
{
int xid;
int rc = -EINVAL;
FreeXid(xid);
return rc;
}
+
+
+long cifs_fcntl(int file_desc, unsigned int command, unsigned long arg,
+ struct file * file)
+{
+ /* Few few file control functions need to be specially mapped. So far
+ only:
+ F_NOTIFY (for directory change notification)
+ And eventually:
+ F_GETLEASE
+ F_SETLEASE
+ need to be mapped here. The others either already are mapped downstream
+ or do not need to go to the server (client only sideeffects):
+ F_DUPFD:
+ F_GETFD:
+ F_SETFD:
+ F_GETFL:
+ F_SETFL:
+ F_GETLK:
+ F_SETLK:
+ F_SETLKW:
+ F_GETOWN:
+ F_SETOWN:
+ F_GETSIG:
+ F_SETSIG:
+ */
+ long rc = 0;
+
+ cFYI(1,("cifs_fcntl: command %d with arg %lx",command,arg)); /* BB removeme BB */
+
+ switch (command) {
+ case F_NOTIFY:
+ /* let the local call have a chance to fail first */
+ rc = generic_file_fcntl(file_desc,command,arg,file);
+ if(rc)
+ return rc;
+ else {
+ /* local call succeeded try to do remote notify to
+ pick up changes from other clients to server file */
+ cifs_directory_notify(arg, file);
+ /* BB add case to long and return rc from above */
+ return rc;
+ }
+ break;
+ default:
+ break;
+ }
+ return generic_file_fcntl(file_desc,command,arg,file);
+}
+
d_instantiate(direntry, newinode);
if(direntry->d_inode)
direntry->d_inode->i_nlink = 2;
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)current->euid,
- (__u64)current->egid,
- 0 /* dev_t */,
- cifs_sb->local_nls);
- } else {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)-1,
- (__u64)-1,
- 0 /* dev_t */,
- cifs_sb->local_nls);
- }
+ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)-1,
+ (__u64)-1,
+ 0 /* dev_t */,
+ cifs_sb->local_nls);
else { /* BB to be implemented via Windows secrty descriptors*/
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
}
void NTLMSSPOWFencrypt(unsigned char passwd[8],
unsigned char *ntlmchalresp, unsigned char p24[24]);
void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+int decode_pw_buffer(char in_buffer[516], char *new_pwrd,
+ int new_pwrd_size, __u32 * new_pw_len);
/*
This implements the X/Open SMB password encryption
/*
* fs/cifs/smberr.h
*
- * Copyright (c) International Business Machines Corp., 2002,2004
+ * Copyright (c) International Business Machines Corp., 2002
* Author(s): Steve French (sfrench@us.ibm.com)
*
* See Error Codes section of the SNIA CIFS Specification
#define ERRinvparm 87
#define ERRdiskfull 112
#define ERRinvname 123
-#define ERRinvlevel 124
#define ERRdirnotempty 145
#define ERRnotlocked 158
#define ERRalreadyexists 183
int rc = 0;
int i = 0;
struct msghdr smb_msg;
- struct kvec iov;
+ struct iovec iov;
+ mm_segment_t temp_fs;
if(ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
smb_msg.msg_name = sin;
smb_msg.msg_namelen = sizeof (struct sockaddr);
+ smb_msg.msg_iov = &iov;
+ smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
cFYI(1, ("Sending smb of length %d ", smb_buf_length));
dump_smb(smb_buffer, smb_buf_length + 4);
+ temp_fs = get_fs(); /* we must turn off socket api parm checking */
+ set_fs(get_ds());
while(iov.iov_len > 0) {
- rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, smb_buf_length + 4);
+ rc = sock_sendmsg(ssocket, &smb_msg, smb_buf_length + 4);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
if(i > 60) {
iov.iov_base += rc;
iov.iov_len -= rc;
}
+ set_fs(temp_fs);
if (rc < 0) {
cERROR(1,("Error %d sending data on socket to server.", rc));
#include "cifsproto.h"
#include "cifs_debug.h"
-#define MAX_EA_VALUE_SIZE 65535
-#define CIFS_XATTR_DOS_ATTRIB "user.DOSATTRIB"
-#define CIFS_XATTR_USER_PREFIX "user."
-#define CIFS_XATTR_SYSTEM_PREFIX "system."
-#define CIFS_XATTR_OS2_PREFIX "OS2." /* BB should check for this someday */
-/* also note could add check for security prefix XATTR_SECURITY_PREFIX */
-
-
-int cifs_removexattr(struct dentry * direntry, const char * ea_name)
+int cifs_removexattr(struct dentry * direntry, const char * name)
{
int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for
- system and perhaps security prefixes? */
- } else {
- ea_name+=5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
- (__u16)0, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-int cifs_setxattr(struct dentry * direntry, const char * ea_name,
- const void * ea_value, size_t value_size, int flags)
+int cifs_setxattr(struct dentry * direntry, const char * name,
+ const void * value, size_t size, int flags)
{
int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- /* return dos attributes as pseudo xattr */
- /* return alt name if available as pseudo attr */
-
- /* if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
- returns as xattrs */
- if(value_size > MAX_EA_VALUE_SIZE) {
- cFYI(1,("size of EA value too large"));
- if(full_path)
- kfree(full_path);
- FreeXid(xid);
- return -EOPNOTSUPP;
- }
-
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for
- system and perhaps security prefixes? */
- } else {
- ea_name+=5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
- (__u16)value_size, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
- void * ea_value, size_t buf_size)
+ssize_t cifs_getxattr(struct dentry * direntry, const char * name,
+ void * value, size_t size)
{
ssize_t rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- /* return dos attributes as pseudo xattr */
- /* return alt name if available as pseudo attr */
- if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for system? */
- } else {
- /* We could add a check here
- if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
- returns as xattrs */
- ea_name+=5; /* skip past user. */
- rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
- buf_size, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
+ssize_t cifs_listxattr(struct dentry * direntry, char * ea_data, size_t ea_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
struct cifsTconInfo *pTcon;
struct super_block * sb;
char * full_path;
-
if(direntry == NULL)
return -EIO;
if(direntry->d_inode == NULL)
FreeXid(xid);
return -ENOMEM;
}
- /* return dos attributes as pseudo xattr */
+ /* return dosattributes as pseudo xattr */
/* return alt name if available as pseudo attr */
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
- cifs_sb->local_nls);
-
- if (full_path)
- kfree(full_path);
+ rc = CIFSSMBQAllEAs(xid,pTcon,full_path,ea_data,ea_size,cifs_sb->local_nls);
FreeXid(xid);
#endif
return rc;
}
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp, loff_t * ppos )
+ void __user * buffer, size_t * lenp )
{
if ( write ) {
reset_coda_vfs_stats();
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
*lenp = 0;
}
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp, loff_t * ppos )
+ size_t * lenp )
{
if ( write ) {
reset_coda_cache_inv_stats();
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
*lenp = 0;
}
static int fb_getput_cmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fb_cmap_user __user *cmap;
+ struct fb_cmap __user *cmap;
struct fb_cmap32 __user *cmap32;
__u32 data;
int err;
{
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- if (dentry->d_extra_attributes) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
call_rcu(&dentry->d_rcu, d_callback);
}
struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
if (!list_empty(&this->d_lru)) {
dentry_stat.nr_unused--;
- list_del_init(&this->d_lru);
+ list_del(&this->d_lru);
}
/*
dentry->d_sb = NULL;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
- dentry->d_extra_attributes = NULL;
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
dentry->d_bucket = NULL;
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
- /* flush any possible attributes */
- if (dentry->d_extra_attributes) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
- if (target->d_extra_attributes) {
- kfree(target->d_extra_attributes);
- target->d_extra_attributes = NULL;
- }
-
list_del(&dentry->d_child);
list_del(&target->d_child);
*
* "buflen" should be positive. Caller holds the dcache_lock.
*/
-char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
struct dentry *root, struct vfsmount *rootmnt,
char *buffer, int buflen)
{
return ERR_PTR(-ENAMETOOLONG);
}
-EXPORT_SYMBOL_GPL(__d_path);
-
/* write full pathname into buffer and return start of pathname */
char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
char *buf, int buflen)
INIT_HLIST_HEAD(&dentry_hashtable[loop]);
}
-void flush_dentry_attributes (void)
-{
- struct hlist_node *tmp;
- struct dentry *dentry;
- int i;
-
- spin_lock(&dcache_lock);
- for (i = 0; i <= d_hash_mask; i++)
- hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
- spin_unlock(&dcache_lock);
-}
-
-EXPORT_SYMBOL_GPL(flush_dentry_attributes);
-
static void __init dcache_init(unsigned long mempages)
{
/*
static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
loff_t * ppos);
static struct file_operations stat_fops = {
- .open = nonseekable_open,
.read = stat_read,
};
#endif
/* Devfs daemon file operations */
static struct file_operations devfsd_fops = {
- .open = nonseekable_open,
.read = devfsd_read,
.ioctl = devfsd_ioctl,
.release = devfsd_close,
struct devfsd_notify_struct *info = fs_info->devfsd_info;
DECLARE_WAITQUEUE(wait, current);
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
/* Verify the task has grabbed the queue */
if (fs_info->devfsd_task != current)
return -EPERM;
num = sprintf(txt, "Number of entries: %u number of bytes: %u\n",
stat_num_entries, stat_num_bytes) + 1;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (*ppos >= num)
return 0;
if (*ppos + len > num)
#include <linux/vs_base.h>
#include "xattr.h"
+#define DEVPTS_SUPER_MAGIC 0x1cd1
+
static struct vfsmount *devpts_mnt;
static struct dentry *devpts_root;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_nlink = 2;
- inode->i_xid = vx_current_xid();
devpts_root = s->s_root = d_alloc_root(inode);
if (s->s_root)
dn->dn_next = inode->i_dnotify;
inode->i_dnotify = dn;
spin_unlock(&inode->i_lock);
-
- if (filp->f_op && filp->f_op->dir_notify)
- return filp->f_op->dir_notify(filp, arg);
return 0;
out_free:
#include <linux/rmap.h>
#include <linux/ckrm.h>
#include <linux/vs_memory.h>
-#include <linux/ckrm_mem.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
tsk->active_mm = mm;
activate_mm(active_mm, mm);
task_unlock(tsk);
- arch_pick_mmap_layout(mm);
-#ifdef CONFIG_CKRM_RES_MEM
- if (old_mm) {
- spin_lock(&old_mm->peertask_lock);
- list_del(&tsk->mm_peers);
- ckrm_mem_evaluate_mm(old_mm);
- spin_unlock(&old_mm->peertask_lock);
- }
- spin_lock(&mm->peertask_lock);
- list_add_tail(&tsk->mm_peers, &mm->tasklist);
- ckrm_mem_evaluate_mm(mm);
- spin_unlock(&mm->peertask_lock);
-#endif
if (old_mm) {
if (active_mm != old_mm) BUG();
mmput(old_mm);
if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
/* Set-uid? */
if (mode & S_ISUID) {
- current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_uid = inode->i_uid;
+#ifdef __i386__
+ /* reset personality */
+ current->personality = PER_LINUX;
+#endif
}
/* Set-gid? */
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
- current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_gid = inode->i_gid;
+#ifdef __i386__
+ /* reset personality */
+ current->personality = PER_LINUX;
+#endif
}
}
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/namei.h>
-#include <linux/vs_base.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
{
int mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
/* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && (IS_RDONLY(inode) ||
- (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/* Nobody gets write access to an immutable file */
#include <linux/quotaops.h>
#include <linux/sched.h>
#include <linux/buffer_head.h>
-#include <linux/vs_base.h>
-#include <linux/vs_dlimit.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
root_blocks = le32_to_cpu(es->s_r_blocks_count);
- DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks);
-
if (free_blocks < count)
count = free_blocks;
}
error_return:
brelse(bitmap_bh);
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, freed);
release_blocks(sb, freed);
DQUOT_FREE_BLOCK(inode, freed);
}
*err = -ENOSPC;
goto out_dquot;
}
- if (DLIMIT_ALLOC_BLOCK(sb, inode->i_xid, es_alloc)) {
- *err = -ENOSPC;
- goto out_dlimit;
- }
ext2_debug ("goal=%lu.\n", goal);
*err = 0;
out_release:
group_release_blocks(sb, group_no, desc, gdp_bh, group_alloc);
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, es_alloc);
-out_dlimit:
release_blocks(sb, es_alloc);
out_dquot:
DQUOT_FREE_BLOCK(inode, dq_alloc);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
int need_revalidate = (filp->f_version != inode->i_version);
- int ret;
+ int ret = 0;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
- goto success;
+ goto done;
if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
types = ext2_filetype_table;
le32_to_cpu(de->inode), d_type);
if (over) {
ext2_put_page(page);
- goto success;
+ goto done;
}
}
}
ext2_put_page(page);
}
-success:
- ret = 0;
done:
filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
filp->f_version = inode->i_version;
- return ret;
+ return 0;
}
/*
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
-#include <linux/vs_base.h>
-#include <linux/vs_dlimit.h>
-
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
if (!is_bad_inode(inode)) {
/* Quota is already initialized in iput() */
ext2_xattr_delete_inode(inode);
- DLIMIT_FREE_INODE(sb, inode->i_xid);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
}
if (!inode)
return ERR_PTR(-ENOMEM);
- if (sb->s_flags & MS_TAGXID)
- inode->i_xid = current->xid;
- else
- inode->i_xid = 0;
-
- if (DLIMIT_ALLOC_INODE(sb, inode->i_xid)) {
- err = -ENOSPC;
- goto fail_dlim;
- }
ei = EXT2_I(inode);
sbi = EXT2_SB(sb);
es = sbi->s_es;
return inode;
fail2:
- DLIMIT_FREE_INODE(sb, inode->i_xid);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
return ERR_PTR(err);
fail:
- DLIMIT_FREE_INODE(sb, inode->i_xid);
-fail_dlim:
make_bad_inode(inode);
iput(inode);
return ERR_PTR(err);
{
unsigned int flags = EXT2_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_IUNLINK|S_BARRIER|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT2_APPEND_FL)
uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
- le16_to_cpu(raw_inode->i_raw_xid));
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
struct ext2_inode_info *ei = EXT2_I(inode);
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
struct buffer_head * bh;
struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
int n;
if (error)
return error;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
- (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
- (iattr->ia_valid & ATTR_XID && iattr->ia_xid != inode->i_xid)) {
+ (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
if (error)
return error;
}
- if (iattr->ia_valid & ATTR_ATTR_FLAG)
- ext2_setattr_flags(inode, iattr->ia_attr_flags);
-
error = inode_setattr(inode, iattr);
if (!error && (iattr->ia_valid & ATTR_MODE))
error = ext2_acl_chmod(inode);
case EXT2_IOC_SETFLAGS: {
unsigned int oldflags;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if (((oldflags & EXT2_IMMUTABLE_FL) ||
+ if ((oldflags & EXT2_IMMUTABLE_FL) ||
((flags ^ oldflags) &
- (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL | EXT2_IUNLINK_FL)))
- && !capable(CAP_LINUX_IMMUTABLE)) {
- return -EPERM;
+ (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
}
flags = flags & EXT2_FL_USER_MODIFIABLE;
case EXT2_IOC_SETVERSION:
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(inode->i_generation, (int __user *) arg))
return -EFAULT;
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
-#include <linux/vs_dlimit.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
the inode. */
ea_bdebug(new_bh, "reusing block");
- error = -ENOSPC;
- if (DLIMIT_ALLOC_BLOCK(sb, inode->i_xid, 1))
- goto cleanup;
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1)) {
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
unlock_buffer(new_bh);
goto cleanup;
}
/* Decrement the refcount only. */
HDR(old_bh)->h_refcount = cpu_to_le32(
le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
DQUOT_FREE_BLOCK(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
- DLIMIT_FREE_BLOCK(inode->i_sb, inode->i_xid, 1);
DQUOT_FREE_BLOCK(inode, 1);
}
ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/namei.h>
#include <linux/ext3_jbd.h>
#include <linux/ext3_fs.h>
-#include <linux/vs_base.h>
#include "xattr.h"
#include "acl.h"
{
int mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
/* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && (IS_RDONLY(inode) ||
- (nd && nd->mnt && MNT_IS_RDONLY(nd->mnt))) &&
+ if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/* Nobody gets write access to an immutable file */
#include <linux/ext3_jbd.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
-#include <linux/vs_base.h>
-#include <linux/vs_dlimit.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
error_return:
brelse(bitmap_bh);
ext3_std_error(sb, err);
- if (dquot_freed_blocks) {
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, dquot_freed_blocks);
+ if (dquot_freed_blocks)
DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
- }
return;
}
return -1;
}
-static int ext3_has_free_blocks(struct super_block *sb)
+static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
{
- struct ext3_sb_info *sbi = EXT3_SB(sb);
- int free_blocks, root_blocks, cond;
+ int free_blocks, root_blocks;
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-
- vxdprintk(VXD_CBIT(dlim, 3),
- "ext3_has_free_blocks(%p): free=%u, root=%u",
- sb, free_blocks, root_blocks);
-
- DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks);
-
- cond = (free_blocks < root_blocks + 1 &&
- !capable(CAP_SYS_RESOURCE) &&
+ if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
sbi->s_resuid != current->fsuid &&
- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
-
- vxdprintk(VXD_CBIT(dlim, 3),
- "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d",
- sb, free_blocks, root_blocks,
- !capable(CAP_SYS_RESOURCE)?'1':'0',
- sbi->s_resuid, current->fsuid, cond?0:1);
-
- return (cond ? 0 : 1);
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+ }
+ return 1;
}
/*
*/
int ext3_should_retry_alloc(struct super_block *sb, int *retries)
{
- if (!ext3_has_free_blocks(sb) || (*retries)++ > 3)
+ if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3)
return 0;
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
*errp = -EDQUOT;
return 0;
}
- if (DLIMIT_ALLOC_BLOCK(sb, inode->i_xid, 1))
- goto out_dlimit;
sbi = EXT3_SB(sb);
es = EXT3_SB(sb)->s_es;
ext3_debug("goal=%lu.\n", goal);
- if (!ext3_has_free_blocks(sb)) {
+ if (!ext3_has_free_blocks(sbi)) {
*errp = -ENOSPC;
goto out;
}
io_error:
*errp = -EIO;
out:
- if (!performed_allocation)
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
-out_dlimit:
if (fatal) {
*errp = fatal;
ext3_std_error(sb, fatal);
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
-#include <linux/vs_dlimit.h>
#include <asm/bitops.h>
#include <asm/byteorder.h>
*/
DQUOT_INIT(inode);
ext3_xattr_delete_inode(handle, inode);
- DLIMIT_FREE_INODE(sb, inode->i_xid);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
-
- if (sb->s_flags & MS_TAGXID)
- inode->i_xid = current->xid;
- else
- inode->i_xid = 0;
-
- if (DLIMIT_ALLOC_INODE(sb, inode->i_xid)) {
- err = -ENOSPC;
- goto out;
- }
ei = EXT3_I(inode);
sbi = EXT3_SB(sb);
ext3_debug("allocating inode %lu\n", inode->i_ino);
goto really_out;
fail:
- DLIMIT_FREE_INODE(sb, inode->i_xid);
ext3_std_error(sb, err);
out:
iput(inode);
return ret;
fail2:
- DLIMIT_FREE_INODE(sb, inode->i_xid);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
{
unsigned int flags = EXT3_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_IUNLINK|S_BARRIER|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT3_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT3_APPEND_FL)
uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
- le16_to_cpu(raw_inode->i_raw_xid));
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
struct ext3_inode_info *ei = EXT3_I(inode);
struct buffer_head *bh = iloc->bh;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
return error;
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
- (ia_valid & ATTR_XID && attr->ia_xid != inode->i_xid)) {
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if ((attr->ia_valid & ATTR_XID)
- && inode->i_sb
- && (inode->i_sb->s_flags & MS_TAGXID))
- inode->i_xid = attr->ia_xid;
error = ext3_mark_inode_dirty(handle, inode);
ext3_journal_stop(handle);
}
unsigned int oldflags;
unsigned int jflag;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if (((oldflags & EXT3_IMMUTABLE_FL) ||
+ if ((oldflags & EXT3_IMMUTABLE_FL) ||
((flags ^ oldflags) &
- (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL)))
- && !capable(CAP_LINUX_IMMUTABLE)) {
- return -EPERM;
+ (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
}
/*
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(generation, (int __user *) arg))
return -EFAULT;
return ret;
}
#endif
+
#if defined(CONFIG_VSERVER_LEGACY) && !defined(CONFIG_INOXID_NONE)
case EXT3_IOC_SETXID: {
handle_t *handle;
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0,
- Opt_tagxid, Opt_ignore, Opt_err
+ Opt_tagxid, Opt_ignore, Opt_err, Opt_resize,
};
static match_table_t tokens = {
break;
#ifndef CONFIG_INOXID_NONE
case Opt_tagxid:
- if (is_remount) {
- printk(KERN_ERR "EXT3-fs: cannot specify "
- "tagxid on remount\n");
- return 0;
- }
set_opt (sbi->s_mount_opt, TAG_XID);
break;
#endif
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
-#include <linux/vs_dlimit.h>
#include "xattr.h"
#include "acl.h"
the inode. */
ea_bdebug(new_bh, "reusing block");
- error = -ENOSPC;
- if (DLIMIT_ALLOC_BLOCK(sb, inode->i_xid, 1))
- goto cleanup;
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1)) {
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
unlock_buffer(new_bh);
journal_release_buffer(handle, new_bh,
credits);
/* Decrement the refcount only. */
HDR(old_bh)->h_refcount = cpu_to_le32(
le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
DQUOT_FREE_BLOCK(inode, 1);
ext3_journal_dirty_metadata(handle, old_bh);
ea_bdebug(old_bh, "refcount now=%d",
ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
- DLIMIT_FREE_BLOCK(inode->i_sb, inode->i_xid, 1);
DQUOT_FREE_BLOCK(inode, 1);
}
ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
return error;
}
-int dupfd(struct file *file, unsigned int start)
+static int dupfd(struct file *file, unsigned int start)
{
struct files_struct * files = current->files;
int fd;
FD_SET(fd, files->open_fds);
FD_CLR(fd, files->close_on_exec);
spin_unlock(&files->file_lock);
- // vx_openfd_inc(fd);
+ vx_openfd_inc(fd);
fd_install(fd, file);
} else {
spin_unlock(&files->file_lock);
return fd;
}
-EXPORT_SYMBOL_GPL(dupfd);
-
asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
{
int err = -EBADF;
FD_SET(newfd, files->open_fds);
FD_CLR(newfd, files->close_on_exec);
spin_unlock(&files->file_lock);
- // vx_openfd_inc(newfd);
+ vx_openfd_inc(newfd);
if (tofree)
filp_close(tofree, files);
return -EINVAL;
}
- if (filp->f_op && filp->f_op->check_flags)
- error = filp->f_op->check_flags(arg);
- if (error)
- return error;
-
lock_kernel();
if ((arg ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync) {
EXPORT_SYMBOL(f_delown);
-static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
- struct file *filp)
+long generic_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
{
long err = -EINVAL;
}
return err;
}
+EXPORT_SYMBOL(generic_file_fcntl);
+
+static long do_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
+{
+ if (filp->f_op && filp->f_op->fcntl)
+ return filp->f_op->fcntl(fd, cmd, arg, filp);
+ return generic_file_fcntl(fd, cmd, arg, filp);
+}
asmlinkage long sys_fcntl(int fd, unsigned int cmd, unsigned long arg)
{
}
filp->f_version = 0;
- /* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
-
switch (filp->f_mode) {
case 1:
/*
memset(filp, 0, sizeof(*filp));
eventpoll_init_file(filp);
filp->f_flags = flags;
- filp->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+ filp->f_mode = (flags+1) & O_ACCMODE;
atomic_set(&filp->f_count, 1);
filp->f_dentry = dentry;
filp->f_mapping = dentry->d_inode->i_mapping;
} else if (inode->i_state & I_DIRTY) {
/*
* Someone redirtied the inode while were writing back
- * the pages.
+ * the pages: nothing to do.
*/
- list_move(&inode->i_list, &sb->s_dirty);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int size;
+ unsigned int shift, size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- tree->node_size_shift = ffs(size) - 1;
+ for (shift = 0; size >>= 1; shift += 1)
+ ;
+ tree->node_size_shift = shift;
+
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
kunmap(page);
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int size;
+ unsigned int shift, size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- tree->node_size_shift = ffs(size) - 1;
+ for (shift = 0; size >>= 1; shift += 1)
+ ;
+ tree->node_size_shift = shift;
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
flags |= EXT2_FLAG_NODUMP; /* EXT2_NODUMP_FL */
return put_user(flags, (int __user *)arg);
case HFSPLUS_IOC_EXT2_SETFLAGS: {
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
# Licensed under the GPL
#
+# struct stat64 changed the inode field name between 2.2 and 2.4 from st_ino
+# to __st_ino. It stayed in the same place, so as long as the correct name
+# is used, hostfs compiled on 2.2 should work on 2.4 and vice versa.
+
+STAT64_INO_FIELD := $(shell grep -q __st_ino /usr/include/bits/stat.h && \
+ echo __)st_ino
+
+hostfs-objs := hostfs_kern.o hostfs_user.o
+
obj-y =
-obj-$(CONFIG_EXTERNFS) += externfs.o
-obj-$(CONFIG_HOSTFS) += host_fs.o host_file.o
-obj-$(CONFIG_HUMFS) += humfs.o meta_fs.o
+obj-$(CONFIG_HOSTFS) += hostfs.o
SINGLE_OBJS = $(foreach f,$(patsubst %.o,%,$(obj-y) $(obj-m)),$($(f)-objs))
USER_OBJS := $(filter %_user.o,$(obj-y) $(obj-m) $(SINGLE_OBJS))
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
+USER_CFLAGS += -DSTAT64_INO_FIELD=$(STAT64_INO_FIELD)
+
$(USER_OBJS) : %.o: %.c
$(CC) $(CFLAGS_$(notdir $@)) $(USER_CFLAGS) -c -o $@ $<
+++ /dev/null
-/*
- * Copyright (C) 2000 - 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include <linux/stddef.h>
-#include <linux/fs.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/blkdev.h>
-#include <linux/statfs.h>
-#include <asm/uaccess.h>
-#include "hostfs.h"
-#include "kern_util.h"
-#include "kern.h"
-#include "user_util.h"
-#include "2_5compat.h"
-#include "mem.h"
-#include "filehandle.h"
-
-struct externfs {
- struct list_head list;
- struct externfs_mount_ops *mount_ops;
- struct file_system_type type;
-};
-
-static inline struct externfs_inode *EXTERNFS_I(struct inode *inode)
-{
- return(container_of(inode, struct externfs_inode, vfs_inode));
-}
-
-#define file_externfs_i(file) EXTERNFS_I((file)->f_dentry->d_inode)
-
-int externfs_d_delete(struct dentry *dentry)
-{
- return(1);
-}
-
-struct dentry_operations externfs_dentry_ops = {
-};
-
-#define EXTERNFS_SUPER_MAGIC 0x00c0ffee
-
-static struct inode_operations externfs_iops;
-static struct inode_operations externfs_dir_iops;
-static struct address_space_operations externfs_link_aops;
-
-static char *dentry_name(struct dentry *dentry, int extra)
-{
- struct dentry *parent;
- char *name;
- int len;
-
- len = 0;
- parent = dentry;
- while(parent->d_parent != parent){
- len += parent->d_name.len + 1;
- parent = parent->d_parent;
- }
-
- name = kmalloc(len + extra + 1, GFP_KERNEL);
- if(name == NULL) return(NULL);
-
- name[len] = '\0';
- parent = dentry;
- while(parent->d_parent != parent){
- len -= parent->d_name.len + 1;
- name[len] = '/';
- strncpy(&name[len + 1], parent->d_name.name,
- parent->d_name.len);
- parent = parent->d_parent;
- }
-
- return(name);
-}
-
-char *inode_name(struct inode *ino, int extra)
-{
- struct dentry *dentry;
-
- dentry = list_entry(ino->i_dentry.next, struct dentry, d_alias);
- return(dentry_name(dentry, extra));
-}
-
-char *inode_name_prefix(struct inode *inode, char *prefix)
-{
- int len;
- char *name;
-
- len = strlen(prefix);
- name = inode_name(inode, len);
- if(name == NULL)
- return(name);
-
- memmove(&name[len], name, strlen(name) + 1);
- memcpy(name, prefix, strlen(prefix));
- return(name);
-}
-
-static int read_name(struct inode *ino, char *name)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- /* The non-int inode fields are copied into ints by stat_file and
- * then copied into the inode because passing the actual pointers
- * in and having them treated as int * breaks on big-endian machines
- */
- dev_t i_rdev;
- int err;
- int i_mode, i_nlink, i_blksize;
- unsigned long atime, mtime, ctime;
- unsigned long long i_size;
- unsigned long long i_ino;
- unsigned long long i_blocks;
-
- err = (*ops->stat_file)(name, ino->i_sb->s_fs_info, &i_rdev, &i_ino,
- &i_mode, &i_nlink, &ino->i_uid, &ino->i_gid,
- &i_size, &atime, &mtime, &ctime, &i_blksize,
- &i_blocks);
- if(err) return(err);
-
- ino->i_atime.tv_sec = atime;
- ino->i_atime.tv_nsec = 0;
-
- ino->i_ctime.tv_sec = ctime;
- ino->i_ctime.tv_nsec = 0;
-
- ino->i_mtime.tv_sec = mtime;
- ino->i_mtime.tv_nsec = 0;
-
- ino->i_ino = i_ino;
- ino->i_rdev = i_rdev;
- ino->i_mode = i_mode;
- ino->i_nlink = i_nlink;
- ino->i_size = i_size;
- ino->i_blksize = i_blksize;
- ino->i_blocks = i_blocks;
- return(0);
-}
-
-static char *follow_link(char *link,
- int (*do_read_link)(char *path, int uid, int gid,
- char *buf, int size,
- struct externfs_data *ed),
- int uid, int gid, struct externfs_data *ed)
-{
- int len, n;
- char *name, *resolved, *end;
-
- len = 64;
- while(1){
- n = -ENOMEM;
- name = kmalloc(len, GFP_KERNEL);
- if(name == NULL)
- goto out;
-
- n = (*do_read_link)(link, uid, gid, name, len, ed);
- if(n < len)
- break;
- len *= 2;
- kfree(name);
- }
- if(n < 0)
- goto out_free;
-
- if(*name == '/')
- return(name);
-
- end = strrchr(link, '/');
- if(end == NULL)
- return(name);
-
- *(end + 1) = '\0';
- len = strlen(link) + strlen(name) + 1;
-
- resolved = kmalloc(len, GFP_KERNEL);
- if(resolved == NULL){
- n = -ENOMEM;
- goto out_free;
- }
-
- sprintf(resolved, "%s%s", link, name);
- kfree(name);
- return(resolved);
-
- out_free:
- kfree(name);
- out:
- return(ERR_PTR(n));
-}
-
-static int read_inode(struct inode *ino)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *name, *new;
- int err = 0, type;
-
- /* Unfortunately, we are called from iget() when we don't have a dentry
- * allocated yet.
- */
- if(list_empty(&ino->i_dentry))
- goto out;
-
- err = -ENOMEM;
- name = inode_name(ino, 0);
- if(name == NULL)
- goto out;
-
- type = (*ops->file_type)(name, NULL, ed);
- if(type < 0){
- err = type;
- goto out_free;
- }
-
- if(type == OS_TYPE_SYMLINK){
- new = follow_link(name, ops->read_link, current->fsuid,
- current->fsgid, ed);
- if(IS_ERR(new)){
- err = PTR_ERR(new);
- goto out_free;
- }
- kfree(name);
- name = new;
- }
-
- err = read_name(ino, name);
- out_free:
- kfree(name);
- out:
- return(err);
-}
-
-int externfs_statfs(struct super_block *sb, struct kstatfs *sf)
-{
- /* do_statfs uses struct statfs64 internally, but the linux kernel
- * struct statfs still has 32-bit versions for most of these fields,
- * so we convert them here
- */
- int err;
- long long f_blocks;
- long long f_bfree;
- long long f_bavail;
- long long f_files;
- long long f_ffree;
- struct externfs_data *ed = sb->s_fs_info;
-
- err = (*ed->file_ops->statfs)(&sf->f_bsize, &f_blocks, &f_bfree,
- &f_bavail, &f_files, &f_ffree,
- &sf->f_fsid, sizeof(sf->f_fsid),
- &sf->f_namelen, sf->f_spare, ed);
- if(err)
- return(err);
-
- sf->f_blocks = f_blocks;
- sf->f_bfree = f_bfree;
- sf->f_bavail = f_bavail;
- sf->f_files = f_files;
- sf->f_ffree = f_ffree;
- sf->f_type = EXTERNFS_SUPER_MAGIC;
- return(0);
-}
-
-static struct inode *externfs_alloc_inode(struct super_block *sb)
-{
- struct externfs_data *ed = sb->s_fs_info;
- struct externfs_inode *ext;
-
- ext = (*ed->mount_ops->init_file)(ed);
- if(ext == NULL)
- return(NULL);
-
- *ext = ((struct externfs_inode) { .ops = ed->file_ops });
-
- inode_init_once(&ext->vfs_inode);
- return(&ext->vfs_inode);
-}
-
-static void externfs_destroy_inode(struct inode *inode)
-{
- struct externfs_inode *ext = EXTERNFS_I(inode);
-
- (*ext->ops->close_file)(ext, inode->i_size);
-}
-
-static void externfs_read_inode(struct inode *inode)
-{
- read_inode(inode);
-}
-
-static struct super_operations externfs_sbops = {
- .alloc_inode = externfs_alloc_inode,
- .destroy_inode = externfs_destroy_inode,
- .read_inode = externfs_read_inode,
- .statfs = externfs_statfs,
-};
-
-int externfs_readdir(struct file *file, void *ent, filldir_t filldir)
-{
- void *dir;
- char *name;
- unsigned long long next, ino;
- int error, len;
- struct externfs_file_ops *ops = file_externfs_i(file)->ops;
- struct externfs_data *ed = file->f_dentry->d_inode->i_sb->s_fs_info;
-
- name = dentry_name(file->f_dentry, 0);
- if(name == NULL)
- return(-ENOMEM);
-
- dir = (*ops->open_dir)(name, current->fsuid, current->fsgid, ed);
- kfree(name);
- if(IS_ERR(dir))
- return(PTR_ERR(dir));
-
- next = file->f_pos;
- while((name = (*ops->read_dir)(dir, &next, &ino, &len, ed)) != NULL){
- error = (*filldir)(ent, name, len, file->f_pos, ino,
- DT_UNKNOWN);
- if(error)
- break;
- file->f_pos = next;
- }
- (*ops->close_dir)(dir, ed);
- return(0);
-}
-
-int externfs_file_open(struct inode *ino, struct file *file)
-{
- ino->i_nlink++;
- return(0);
-}
-
-int externfs_fsync(struct file *file, struct dentry *dentry, int datasync)
-{
- struct externfs_file_ops *ops = file_externfs_i(file)->ops;
- struct inode *inode = dentry->d_inode;
- struct externfs_data *ed = inode->i_sb->s_fs_info;
-
- return((*ops->truncate_file)(EXTERNFS_I(inode), inode->i_size, ed));
-}
-
-static struct file_operations externfs_file_fops = {
- .llseek = generic_file_llseek,
- .read = generic_file_read,
- .write = generic_file_write,
- .mmap = generic_file_mmap,
- .open = externfs_file_open,
- .release = NULL,
- .fsync = externfs_fsync,
-};
-
-static struct file_operations externfs_dir_fops = {
- .readdir = externfs_readdir,
- .read = generic_read_dir,
-};
-
-struct wp_info {
- struct page *page;
- int count;
- unsigned long long start;
- unsigned long long size;
- int (*truncate)(struct externfs_inode *ext, __u64 size,
- struct externfs_data *ed);
- struct externfs_inode *ei;
- struct externfs_data *ed;
-};
-
-static void externfs_finish_writepage(char *buffer, int res, void *arg)
-{
- struct wp_info *wp = arg;
-
- if(res == wp->count){
- ClearPageError(wp->page);
- if(wp->start + res > wp->size)
- (*wp->truncate)(wp->ei, wp->size, wp->ed);
- }
- else {
- SetPageError(wp->page);
- ClearPageUptodate(wp->page);
- }
-
- kunmap(wp->page);
- unlock_page(wp->page);
- kfree(wp);
-}
-
-int externfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct externfs_file_ops *ops = EXTERNFS_I(inode)->ops;
- struct wp_info *wp;
- struct externfs_data *ed = inode->i_sb->s_fs_info;
- char *buffer;
- unsigned long long base;
- int count = PAGE_CACHE_SIZE;
- int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
- int err, offset;
-
- base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
-
- /* If we are entirely outside the file, then return an error */
- err = -EIO;
- offset = inode->i_size & (PAGE_CACHE_SIZE-1);
- if (page->index > end_index ||
- ((page->index == end_index) && !offset))
- goto out_unlock;
-
- err = -ENOMEM;
- wp = kmalloc(sizeof(*wp), GFP_KERNEL);
- if(wp == NULL)
- goto out_unlock;
-
- *wp = ((struct wp_info) { .page = page,
- .count = count,
- .start = base,
- .size = inode->i_size,
- .truncate = ops->truncate_file,
- .ei = EXTERNFS_I(inode),
- .ed = ed });
-
- buffer = kmap(page);
- err = (*ops->write_file)(EXTERNFS_I(inode), base, buffer, 0,
- count, externfs_finish_writepage, wp, ed);
-
- return err;
-
- out_unlock:
- unlock_page(page);
- return(err);
-}
-
-static void externfs_finish_readpage(char *buffer, int res, void *arg)
-{
- struct page *page = arg;
- struct inode *inode;
-
- if(res < 0){
- SetPageError(page);
- goto out;
- }
-
- inode = page->mapping->host;
- if(inode->i_size >> PAGE_CACHE_SHIFT == page->index)
- res = inode->i_size % PAGE_CACHE_SIZE;
-
- memset(&buffer[res], 0, PAGE_CACHE_SIZE - res);
-
- flush_dcache_page(page);
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
- out:
- kunmap(page);
- unlock_page(page);
-}
-
-static int externfs_readpage(struct file *file, struct page *page)
-{
- struct inode *ino = page->mapping->host;
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *buffer;
- long long start;
- int err = 0;
-
- start = (long long) page->index << PAGE_CACHE_SHIFT;
- buffer = kmap(page);
-
- if(ops->map_file_page != NULL){
- /* XXX What happens when PAGE_SIZE != PAGE_CACHE_SIZE? */
- err = (*ops->map_file_page)(file_externfs_i(file), start,
- buffer, file->f_mode & FMODE_WRITE,
- ed);
- if(!err)
- err = PAGE_CACHE_SIZE;
- }
- else err = (*ops->read_file)(file_externfs_i(file), start, buffer,
- PAGE_CACHE_SIZE, 0, 0,
- externfs_finish_readpage, page, ed);
-
- if(err > 0)
- err = 0;
- return(err);
-}
-
-struct writepage_info {
- struct semaphore sem;
- int res;
-};
-
-static void externfs_finish_prepare(char *buffer, int res, void *arg)
-{
- struct writepage_info *wp = arg;
-
- wp->res = res;
- up(&wp->sem);
-}
-
-int externfs_prepare_write(struct file *file, struct page *page,
- unsigned int from, unsigned int to)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct externfs_file_ops *ops = EXTERNFS_I(inode)->ops;
- struct externfs_data *ed = inode->i_sb->s_fs_info;
- char *buffer;
- long long start;
- int err;
- struct writepage_info wp;
-
- if(PageUptodate(page))
- return(0);
-
- start = (long long) page->index << PAGE_CACHE_SHIFT;
- buffer = kmap(page);
-
- if(ops->map_file_page != NULL){
- err = (*ops->map_file_page)(file_externfs_i(file), start,
- buffer, file->f_mode & FMODE_WRITE,
- ed);
- goto out;
-
- }
-
- init_MUTEX_LOCKED(&wp.sem);
- err = (*ops->read_file)(file_externfs_i(file), start, buffer,
- PAGE_CACHE_SIZE, from, to,
- externfs_finish_prepare, &wp, ed);
- down(&wp.sem);
- if(err < 0)
- goto out;
-
- err = wp.res;
- if(err < 0)
- goto out;
-
- if(from > 0)
- memset(buffer, 0, from);
- if(to < PAGE_CACHE_SIZE)
- memset(buffer + to, 0, PAGE_CACHE_SIZE - to);
-
- SetPageUptodate(page);
- err = 0;
- out:
- kunmap(page);
- return(err);
-}
-
-static int externfs_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct externfs_file_ops *ops = EXTERNFS_I(inode)->ops;
- unsigned long long size;
- long long start;
- int err;
-
- start = (long long) (page->index << PAGE_CACHE_SHIFT);
-
- if(ops->map_file_page != NULL)
- err = to - from;
- else {
- size = start + to;
- if(size > inode->i_size){
- inode->i_size = size;
- mark_inode_dirty(inode);
- }
- }
-
- set_page_dirty(page);
- return(to - from);
-}
-
-static int externfs_removepage(struct page *page, int gfpmask)
-{
- physmem_remove_mapping(page_address(page));
- return(0);
-}
-
-static struct address_space_operations externfs_aops = {
- .writepage = externfs_writepage,
- .readpage = externfs_readpage,
- .releasepage = externfs_removepage,
-/* .set_page_dirty = __set_page_dirty_nobuffers, */
- .prepare_write = externfs_prepare_write,
- .commit_write = externfs_commit_write
-};
-
-static int init_inode(struct inode *inode, struct dentry *dentry)
-{
- char *name = NULL;
- int type, err = -ENOMEM, rdev;
- struct externfs_inode *ext = EXTERNFS_I(inode);
- struct externfs_file_ops *ops = ext->ops;
- struct externfs_data *ed = inode->i_sb->s_fs_info;
-
- if(dentry){
- name = dentry_name(dentry, 0);
- if(name == NULL)
- goto out;
- type = (*ops->file_type)(name, &rdev, ed);
- }
- else type = OS_TYPE_DIR;
-
- err = 0;
- if(type == OS_TYPE_SYMLINK)
- inode->i_op = &page_symlink_inode_operations;
- else if(type == OS_TYPE_DIR)
- inode->i_op = &externfs_dir_iops;
- else inode->i_op = &externfs_iops;
-
- if(type == OS_TYPE_DIR) inode->i_fop = &externfs_dir_fops;
- else inode->i_fop = &externfs_file_fops;
-
- if(type == OS_TYPE_SYMLINK)
- inode->i_mapping->a_ops = &externfs_link_aops;
- else inode->i_mapping->a_ops = &externfs_aops;
-
- switch (type) {
- case OS_TYPE_CHARDEV:
- init_special_inode(inode, S_IFCHR, rdev);
- break;
- case OS_TYPE_BLOCKDEV:
- init_special_inode(inode, S_IFBLK, rdev);
- break;
- case OS_TYPE_FIFO:
- init_special_inode(inode, S_IFIFO, 0);
- break;
- case OS_TYPE_SOCK:
- init_special_inode(inode, S_IFSOCK, 0);
- break;
- case OS_TYPE_SYMLINK:
- inode->i_mode = S_IFLNK | S_IRWXUGO;
- }
-
- err = (*ops->open_file)(ext, name, current->fsuid, current->fsgid,
- inode, ed);
- if((err != -EISDIR) && (err != -ENOENT) && (err != -ENXIO))
- goto out_put;
-
- err = 0;
-
- out_free:
- kfree(name);
- out:
- return(err);
-
- out_put:
- iput(inode);
- goto out_free;
-}
-
-int externfs_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
-{
- struct externfs_inode *ext = EXTERNFS_I(dir);
- struct externfs_file_ops *ops = ext->ops;
- struct inode *inode;
- struct externfs_data *ed = dir->i_sb->s_fs_info;
- char *name;
- int err = -ENOMEM;
-
- inode = iget(dir->i_sb, 0);
- if(inode == NULL)
- goto out;
-
- err = init_inode(inode, dentry);
- if(err)
- goto out_put;
-
- err = -ENOMEM;
- name = dentry_name(dentry, 0);
- if(name == NULL)
- goto out_put;
-
- err = (*ops->create_file)(ext, name, mode, current->fsuid,
- current->fsuid, inode, ed);
- if(err)
- goto out_free;
-
- err = read_name(inode, name);
- if(err)
- goto out_rm;
-
- inode->i_nlink++;
- d_instantiate(dentry, inode);
- kfree(name);
- out:
- return(err);
-
- out_rm:
- (*ops->unlink_file)(name, ed);
- out_free:
- kfree(name);
- out_put:
- inode->i_nlink = 0;
- iput(inode);
- goto out;
-}
-
-struct dentry *externfs_lookup(struct inode *ino, struct dentry *dentry,
- struct nameidata *nd)
-{
- struct inode *inode;
- char *name;
- int err = -ENOMEM;
-
- inode = iget(ino->i_sb, 0);
- if(inode == NULL)
- goto out;
-
- err = init_inode(inode, dentry);
- if(err)
- goto out_put;
-
- err = -ENOMEM;
- name = dentry_name(dentry, 0);
- if(name == NULL)
- goto out_put;
-
- err = read_name(inode, name);
- kfree(name);
- if(err){
- if(err != -ENOENT)
- goto out_put;
-
- inode->i_nlink = 0;
- iput(inode);
- inode = NULL;
- }
- d_add(dentry, inode);
- dentry->d_op = &externfs_dentry_ops;
- return(NULL);
-
- out_put:
- inode->i_nlink = 0;
- iput(inode);
- out:
- return(ERR_PTR(err));
-}
-
-static char *inode_dentry_name(struct inode *ino, struct dentry *dentry)
-{
- char *file;
- int len;
-
- file = inode_name(ino, dentry->d_name.len + 1);
- if(file == NULL) return(NULL);
- strcat(file, "/");
- len = strlen(file);
- strncat(file, dentry->d_name.name, dentry->d_name.len);
- file[len + dentry->d_name.len] = '\0';
- return(file);
-}
-
-int externfs_link(struct dentry *to, struct inode *ino, struct dentry *from)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *from_name, *to_name;
- int err = -ENOMEM;
-
- from_name = inode_dentry_name(ino, from);
- if(from_name == NULL)
- goto out;
-
- to_name = dentry_name(to, 0);
- if(to_name == NULL)
- goto out_free_from;
-
- err = (*ops->link_file)(to_name, from_name, current->fsuid,
- current->fsgid, ed);
- if(err)
- goto out_free_to;
-
- d_instantiate(from, to->d_inode);
- to->d_inode->i_nlink++;
- atomic_inc(&to->d_inode->i_count);
-
- out_free_to:
- kfree(to_name);
- out_free_from:
- kfree(from_name);
- out:
- return(err);
-}
-
-int externfs_unlink(struct inode *ino, struct dentry *dentry)
-{
- struct inode *inode;
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *file;
- int err;
-
- file = inode_dentry_name(ino, dentry);
- if(file == NULL)
- return(-ENOMEM);
-
- inode = dentry->d_inode;
- if((inode->i_nlink == 1) && (ops->invisible != NULL))
- (*ops->invisible)(EXTERNFS_I(inode));
-
- err = (*ops->unlink_file)(file, ed);
- kfree(file);
-
- inode->i_nlink--;
-
- return(err);
-}
-
-int externfs_symlink(struct inode *ino, struct dentry *dentry, const char *to)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct inode *inode;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *file;
- int err;
-
- file = inode_dentry_name(ino, dentry);
- if(file == NULL)
- return(-ENOMEM);
- err = (*ops->make_symlink)(file, to, current->fsuid, current->fsgid,
- ed);
- kfree(file);
- if(err)
- goto out;
-
- err = -ENOMEM;
- inode = iget(ino->i_sb, 0);
- if(inode == NULL)
- goto out;
-
- err = init_inode(inode, dentry);
- if(err)
- goto out_put;
-
- d_instantiate(dentry, inode);
- inode->i_nlink++;
- out:
- return(err);
-
- out_put:
- iput(inode);
- goto out;
-}
-
-int externfs_make_dir(struct inode *ino, struct dentry *dentry, int mode)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct inode *inode;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *file;
- int err = -ENOMEM;
-
- file = inode_dentry_name(ino, dentry);
- if(file == NULL)
- goto out;
- err = (*ops->make_dir)(file, mode, current->fsuid, current->fsgid, ed);
-
- err = -ENOMEM;
- inode = iget(ino->i_sb, 0);
- if(inode == NULL)
- goto out_free;
-
- err = init_inode(inode, dentry);
- if(err)
- goto out_put;
-
- err = read_name(inode, file);
- if(err)
- goto out_put;
-
- kfree(file);
- d_instantiate(dentry, inode);
- inode->i_nlink = 2;
- inode->i_mode = S_IFDIR | mode;
-
- ino->i_nlink++;
- out:
- return(err);
- out_put:
- inode->i_nlink = 0;
- iput(inode);
- out_free:
- kfree(file);
- goto out;
-}
-
-int externfs_remove_dir(struct inode *ino, struct dentry *dentry)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *file;
- int err;
-
- file = inode_dentry_name(ino, dentry);
- if(file == NULL)
- return(-ENOMEM);
- err = (*ops->remove_dir)(file, current->fsuid, current->fsgid, ed);
- kfree(file);
-
- dentry->d_inode->i_nlink = 0;
- ino->i_nlink--;
- return(err);
-}
-
-int externfs_make_node(struct inode *dir, struct dentry *dentry, int mode,
- dev_t dev)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(dir)->ops;
- struct externfs_data *ed = dir->i_sb->s_fs_info;
- struct inode *inode;
- char *name;
- int err = -ENOMEM;
-
- inode = iget(dir->i_sb, 0);
- if(inode == NULL)
- goto out;
-
- err = init_inode(inode, dentry);
- if(err)
- goto out_put;
-
- err = -ENOMEM;
- name = dentry_name(dentry, 0);
- if(name == NULL)
- goto out_put;
-
- init_special_inode(inode, mode, dev);
- err = (*ops->make_node)(name, mode & S_IRWXUGO, current->fsuid,
- current->fsgid, mode & S_IFMT, MAJOR(dev),
- MINOR(dev), ed);
- if(err)
- goto out_free;
-
- err = read_name(inode, name);
- if(err)
- goto out_rm;
-
- inode->i_nlink++;
- d_instantiate(dentry, inode);
- kfree(name);
- out:
- return(err);
-
- out_rm:
- (*ops->unlink_file)(name, ed);
- out_free:
- kfree(name);
- out_put:
- inode->i_nlink = 0;
- iput(inode);
- goto out;
-}
-
-int externfs_rename(struct inode *from_ino, struct dentry *from,
- struct inode *to_ino, struct dentry *to)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(from_ino)->ops;
- struct externfs_data *ed = from_ino->i_sb->s_fs_info;
- char *from_name, *to_name;
- int err;
-
- from_name = inode_dentry_name(from_ino, from);
- if(from_name == NULL)
- return(-ENOMEM);
- to_name = inode_dentry_name(to_ino, to);
- if(to_name == NULL){
- kfree(from_name);
- return(-ENOMEM);
- }
- err = (*ops->rename_file)(from_name, to_name, ed);
- kfree(from_name);
- kfree(to_name);
-
- from_ino->i_nlink--;
- to_ino->i_nlink++;
- return(err);
-}
-
-void externfs_truncate(struct inode *ino)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
-
- (*ops->truncate_file)(EXTERNFS_I(ino), ino->i_size, ed);
-}
-
-int externfs_permission(struct inode *ino, int desired, struct nameidata *nd)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *name;
- int r = 0, w = 0, x = 0, err;
-
- if(ops->access_file == NULL)
- return(vfs_permission(ino, desired));
-
- if(desired & MAY_READ) r = 1;
- if(desired & MAY_WRITE) w = 1;
- if(desired & MAY_EXEC) x = 1;
- name = inode_name(ino, 0);
- if(name == NULL)
- return(-ENOMEM);
-
- err = (*ops->access_file)(name, r, w, x, current->fsuid,
- current->fsgid, ed);
- kfree(name);
-
- if(!err)
- err = vfs_permission(ino, desired);
- return(err);
-}
-
-int externfs_setattr(struct dentry *dentry, struct iattr *attr)
-{
- struct externfs_file_ops *ops = EXTERNFS_I(dentry->d_inode)->ops;
- struct externfs_data *ed = dentry->d_inode->i_sb->s_fs_info;
- struct externfs_iattr attrs;
- char *name;
- int err;
-
- attrs.ia_valid = 0;
- if(attr->ia_valid & ATTR_MODE){
- attrs.ia_valid |= EXTERNFS_ATTR_MODE;
- attrs.ia_mode = attr->ia_mode;
- }
- if(attr->ia_valid & ATTR_UID){
- attrs.ia_valid |= EXTERNFS_ATTR_UID;
- attrs.ia_uid = attr->ia_uid;
- }
- if(attr->ia_valid & ATTR_GID){
- attrs.ia_valid |= EXTERNFS_ATTR_GID;
- attrs.ia_gid = attr->ia_gid;
- }
- if(attr->ia_valid & ATTR_SIZE){
- attrs.ia_valid |= EXTERNFS_ATTR_SIZE;
- attrs.ia_size = attr->ia_size;
- }
- if(attr->ia_valid & ATTR_ATIME){
- attrs.ia_valid |= EXTERNFS_ATTR_ATIME;
- attrs.ia_atime = attr->ia_atime.tv_sec;
- }
- if(attr->ia_valid & ATTR_MTIME){
- attrs.ia_valid |= EXTERNFS_ATTR_MTIME;
- attrs.ia_mtime = attr->ia_mtime.tv_sec;
- }
- if(attr->ia_valid & ATTR_CTIME){
- attrs.ia_valid |= EXTERNFS_ATTR_CTIME;
- attrs.ia_ctime = attr->ia_ctime.tv_sec;
- }
- if(attr->ia_valid & ATTR_ATIME_SET){
- attrs.ia_valid |= EXTERNFS_ATTR_ATIME_SET;
- attrs.ia_atime = attr->ia_atime.tv_sec;
- }
- if(attr->ia_valid & ATTR_MTIME_SET){
- attrs.ia_valid |= EXTERNFS_ATTR_MTIME_SET;
- }
- name = dentry_name(dentry, 0);
- if(name == NULL)
- return(-ENOMEM);
- err = (*ops->set_attr)(name, &attrs, ed);
- kfree(name);
- if(err)
- return(err);
-
- return(inode_setattr(dentry->d_inode, attr));
-}
-
-int externfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
-{
- generic_fillattr(dentry->d_inode, stat);
- return(0);
-}
-
-static struct inode_operations externfs_iops = {
- .create = externfs_create,
- .link = externfs_link,
- .unlink = externfs_unlink,
- .symlink = externfs_symlink,
- .mkdir = externfs_make_dir,
- .rmdir = externfs_remove_dir,
- .mknod = externfs_make_node,
- .rename = externfs_rename,
- .truncate = externfs_truncate,
- .permission = externfs_permission,
- .setattr = externfs_setattr,
- .getattr = externfs_getattr,
-};
-
-static struct inode_operations externfs_dir_iops = {
- .create = externfs_create,
- .lookup = externfs_lookup,
- .link = externfs_link,
- .unlink = externfs_unlink,
- .symlink = externfs_symlink,
- .mkdir = externfs_make_dir,
- .rmdir = externfs_remove_dir,
- .mknod = externfs_make_node,
- .rename = externfs_rename,
- .truncate = externfs_truncate,
- .permission = externfs_permission,
- .setattr = externfs_setattr,
- .getattr = externfs_getattr,
-};
-
-int externfs_link_readpage(struct file *file, struct page *page)
-{
- struct inode *ino = page->mapping->host;
- struct externfs_file_ops *ops = EXTERNFS_I(ino)->ops;
- struct externfs_data *ed = ino->i_sb->s_fs_info;
- char *buffer, *name;
- long long start;
- int err;
-
- start = page->index << PAGE_CACHE_SHIFT;
- buffer = kmap(page);
- name = inode_name(ino, 0);
- if(name == NULL)
- return(-ENOMEM);
-
- err = (*ops->read_link)(name, current->fsuid, current->fsgid, buffer,
- PAGE_CACHE_SIZE, ed);
-
- kfree(name);
- if(err == PAGE_CACHE_SIZE)
- err = -E2BIG;
- else if(err > 0){
- flush_dcache_page(page);
- SetPageUptodate(page);
- if (PageError(page)) ClearPageError(page);
- err = 0;
- }
- kunmap(page);
- unlock_page(page);
- return(err);
-}
-
-static int externfs_flushpage(struct page *page, unsigned long offset)
-{
- return(externfs_writepage(page, NULL));
-}
-
-struct externfs_data *inode_externfs_info(struct inode *inode)
-{
- return(inode->i_sb->s_fs_info);
-}
-
-static struct address_space_operations externfs_link_aops = {
- .readpage = externfs_link_readpage,
- .releasepage = externfs_removepage,
- .invalidatepage = externfs_flushpage,
-};
-
-DECLARE_MUTEX(externfs_sem);
-struct list_head externfses = LIST_HEAD_INIT(externfses);
-
-static struct externfs *find_externfs(struct file_system_type *type)
-{
- struct list_head *ele;
- struct externfs *fs;
-
- down(&externfs_sem);
- list_for_each(ele, &externfses){
- fs = list_entry(ele, struct externfs, list);
- if(&fs->type == type)
- goto out;
- }
- fs = NULL;
- out:
- up(&externfs_sem);
- return(fs);
-}
-
-#define DEFAULT_ROOT "/"
-
-char *host_root_filename(char *mount_arg)
-{
- char *root = DEFAULT_ROOT;
-
- if((mount_arg != NULL) && (*mount_arg != '\0'))
- root = mount_arg;
-
- return(uml_strdup(root));
-}
-
-static int externfs_fill_sb(struct super_block *sb, void *data, int silent)
-{
- struct externfs *fs;
- struct inode *root_inode;
- struct externfs_data *sb_data;
- int err = -EINVAL;
-
- sb->s_blocksize = 1024;
- sb->s_blocksize_bits = 10;
- sb->s_magic = EXTERNFS_SUPER_MAGIC;
- sb->s_op = &externfs_sbops;
-
- fs = find_externfs(sb->s_type);
- if(fs == NULL){
- printk("Couldn't find externfs for filesystem '%s'\n",
- sb->s_type->name);
- goto out;
- }
-
- sb_data = (*fs->mount_ops->mount)(data);
- if(IS_ERR(sb_data)){
- err = PTR_ERR(sb_data);
- goto out;
- }
-
- sb->s_fs_info = sb_data;
- sb_data->mount_ops = fs->mount_ops;
-
- root_inode = iget(sb, 0);
- if(root_inode == NULL)
- goto out;
-
- err = init_inode(root_inode, NULL);
- if(err)
- goto out_put;
-
- err = -ENOMEM;
- sb->s_root = d_alloc_root(root_inode);
- if(sb->s_root == NULL)
- goto out_put;
-
- err = read_inode(root_inode);
- if(err)
- goto out_put;
-
- out:
- return(err);
-
- out_put:
- iput(root_inode);
- goto out;
-}
-
-struct super_block *externfs_read_super(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
-{
- return(get_sb_nodev(type, flags, data, externfs_fill_sb));
-}
-
-void init_externfs(struct externfs_data *ed, struct externfs_file_ops *ops)
-{
- ed->file_ops = ops;
-}
-
-int register_externfs(char *name, struct externfs_mount_ops *mount_ops)
-{
- struct externfs *new;
- int err = -ENOMEM;
-
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if(new == NULL)
- goto out;
-
- memset(new, 0, sizeof(*new));
- *new = ((struct externfs) { .list = LIST_HEAD_INIT(new->list),
- .mount_ops = mount_ops,
- .type = { .name = name,
- .get_sb = externfs_read_super,
- .kill_sb = kill_anon_super,
- .fs_flags = 0,
- .owner = THIS_MODULE } });
- list_add(&new->list, &externfses);
-
- err = register_filesystem(&new->type);
- if(err)
- goto out_del;
- return(0);
-
- out_del:
- list_del(&new->list);
- kfree(new);
- out:
- return(err);
-}
-
-void unregister_externfs(char *name)
-{
- struct list_head *ele;
- struct externfs *fs;
-
- down(&externfs_sem);
- list_for_each(ele, &externfses){
- fs = list_entry(ele, struct externfs, list);
- if(!strcmp(fs->type.name, name)){
- list_del(ele);
- up(&externfs_sem);
- return;
- }
- }
- up(&externfs_sem);
- printk("Unregister_externfs - filesystem '%s' not found\n", name);
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include "linux/stddef.h"
-#include "linux/string.h"
-#include "linux/errno.h"
-#include "linux/types.h"
-#include "linux/slab.h"
-#include "linux/fs.h"
-#include "asm/fcntl.h"
-#include "hostfs.h"
-#include "filehandle.h"
-
-extern int append;
-
-char *get_path(const char *path[], char *buf, int size)
-{
- const char **s;
- char *p;
- int new = 1;
-
- for(s = path; *s != NULL; s++){
- new += strlen(*s);
- if((*(s + 1) != NULL) && (strlen(*s) > 0) &&
- ((*s)[strlen(*s) - 1] != '/'))
- new++;
- }
-
- if(new > size){
- buf = kmalloc(new, GFP_KERNEL);
- if(buf == NULL)
- return(NULL);
- }
-
- p = buf;
- for(s = path; *s != NULL; s++){
- strcpy(p, *s);
- p += strlen(*s);
- if((*(s + 1) != NULL) && (strlen(*s) > 0) &&
- ((*s)[strlen(*s) - 1] != '/'))
- strcpy(p++, "/");
- }
-
- return(buf);
-}
-
-void free_path(const char *buf, char *tmp)
-{
- if((buf != tmp) && (buf != NULL))
- kfree((char *) buf);
-}
-
-int host_open_file(const char *path[], int r, int w, struct file_handle *fh)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int mode = 0, err;
- struct openflags flags = OPENFLAGS();
-
- if (r)
- flags = of_read(flags);
- if (w)
- flags = of_write(flags);
- if(append)
- flags = of_append(flags);
-
- err = -ENOMEM;
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = open_filehandle(file, flags, mode, fh);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-void *host_open_dir(const char *path[])
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- void *dir = ERR_PTR(-ENOMEM);
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- dir = open_dir(file);
- out:
- free_path(file, tmp);
- return(dir);
-}
-
-char *host_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out)
-{
- int err;
- char *name;
-
- err = os_seek_dir(stream, *pos);
- if(err)
- return(ERR_PTR(err));
-
- err = os_read_dir(stream, ino_out, &name);
- if(err)
- return(ERR_PTR(err));
-
- if(name == NULL)
- return(NULL);
-
- *len_out = strlen(name);
- *pos = os_tell_dir(stream);
- return(name);
-}
-
-int host_file_type(const char *path[], int *rdev)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- struct uml_stat buf;
- int ret;
-
- ret = -ENOMEM;
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- if(rdev != NULL){
- ret = os_lstat_file(file, &buf);
- if(ret)
- goto out;
- *rdev = MKDEV(buf.ust_rmajor, buf.ust_rminor);
- }
-
- ret = os_file_type(file);
- out:
- free_path(file, tmp);
- return(ret);
-}
-
-int host_create_file(const char *path[], int mode, struct file_handle *fh)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = open_filehandle(file, of_create(of_rdwr(OPENFLAGS())), mode, fh);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-static int do_stat_file(const char *path, int *dev_out,
- unsigned long long *inode_out, int *mode_out,
- int *nlink_out, int *uid_out, int *gid_out,
- unsigned long long *size_out, unsigned long *atime_out,
- unsigned long *mtime_out, unsigned long *ctime_out,
- int *blksize_out, unsigned long long *blocks_out)
-{
- struct uml_stat buf;
- int err;
-
- err = os_lstat_file(path, &buf);
- if(err < 0)
- return(err);
-
- if(dev_out != NULL) *dev_out = MKDEV(buf.ust_major, buf.ust_minor);
- if(inode_out != NULL) *inode_out = buf.ust_ino;
- if(mode_out != NULL) *mode_out = buf.ust_mode;
- if(nlink_out != NULL) *nlink_out = buf.ust_nlink;
- if(uid_out != NULL) *uid_out = buf.ust_uid;
- if(gid_out != NULL) *gid_out = buf.ust_gid;
- if(size_out != NULL) *size_out = buf.ust_size;
- if(atime_out != NULL) *atime_out = buf.ust_atime;
- if(mtime_out != NULL) *mtime_out = buf.ust_mtime;
- if(ctime_out != NULL) *ctime_out = buf.ust_ctime;
- if(blksize_out != NULL) *blksize_out = buf.ust_blksize;
- if(blocks_out != NULL) *blocks_out = buf.ust_blocks;
-
- return(0);
-}
-
-int host_stat_file(const char *path[], int *dev_out,
- unsigned long long *inode_out, int *mode_out,
- int *nlink_out, int *uid_out, int *gid_out,
- unsigned long long *size_out, unsigned long *atime_out,
- unsigned long *mtime_out, unsigned long *ctime_out,
- int *blksize_out, unsigned long long *blocks_out)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err;
-
- err = -ENOMEM;
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = do_stat_file(file, dev_out, inode_out, mode_out, nlink_out,
- uid_out, gid_out, size_out, atime_out, mtime_out,
- ctime_out, blksize_out, blocks_out);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_set_attr(const char *path[], struct externfs_iattr *attrs)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- unsigned long time;
- int err = 0, ma;
-
- if(append && (attrs->ia_valid & EXTERNFS_ATTR_SIZE))
- return(-EPERM);
-
- err = -ENOMEM;
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- if(attrs->ia_valid & EXTERNFS_ATTR_MODE){
- err = os_set_file_perms(file, attrs->ia_mode);
- if(err < 0)
- goto out;
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_UID){
- err = os_set_file_owner(file, attrs->ia_uid, -1);
- if(err < 0)
- goto out;
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_GID){
- err = os_set_file_owner(file, -1, attrs->ia_gid);
- if(err < 0)
- goto out;
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_SIZE){
- err = os_truncate_file(file, attrs->ia_size);
- if(err < 0)
- goto out;
- }
- ma = EXTERNFS_ATTR_ATIME_SET | EXTERNFS_ATTR_MTIME_SET;
- if((attrs->ia_valid & ma) == ma){
- err = os_set_file_time(file, attrs->ia_atime, attrs->ia_mtime);
- if(err)
- goto out;
- }
- else {
- if(attrs->ia_valid & EXTERNFS_ATTR_ATIME_SET){
- err = do_stat_file(file, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, &time,
- NULL, NULL, NULL);
- if(err != 0)
- goto out;
-
- err = os_set_file_time(file, attrs->ia_atime, time);
- if(err)
- goto out;
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_MTIME_SET){
- err = do_stat_file(file, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, &time, NULL,
- NULL, NULL, NULL);
- if(err != 0)
- goto out;
-
- err = os_set_file_time(file, time, attrs->ia_mtime);
- if(err)
- goto out;
- }
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_CTIME) ;
- if(attrs->ia_valid & (EXTERNFS_ATTR_ATIME | EXTERNFS_ATTR_MTIME)){
- err = do_stat_file(file, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, &attrs->ia_atime,
- &attrs->ia_mtime, NULL, NULL, NULL);
- if(err != 0)
- goto out;
- }
-
- err = 0;
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_make_symlink(const char *from[], const char *to)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(from, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_make_symlink(to, file);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_unlink_file(const char *path[])
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- if(append)
- return(-EPERM);
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_remove_file(file);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_make_dir(const char *path[], int mode)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_make_dir(file, mode);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_remove_dir(const char *path[])
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_remove_dir(file);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-int host_link_file(const char *to[], const char *from[])
-{
- char from_tmp[HOSTFS_BUFSIZE], *f, to_tmp[HOSTFS_BUFSIZE], *t;
- int err = -ENOMEM;
-
- f = get_path(from, from_tmp, sizeof(from_tmp));
- t = get_path(to, to_tmp, sizeof(to_tmp));
- if((f == NULL) || (t == NULL))
- goto out;
-
- err = os_link_file(t, f);
- out:
- free_path(f, from_tmp);
- free_path(t, to_tmp);
- return(err);
-}
-
-int host_read_link(const char *path[], char *buf, int size)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int n = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- n = os_read_symlink(file, buf, size);
- if(n < size)
- buf[n] = '\0';
- out:
- free_path(file, tmp);
- return(n);
-}
-
-int host_rename_file(const char *from[], const char *to[])
-{
- char from_tmp[HOSTFS_BUFSIZE], *f, to_tmp[HOSTFS_BUFSIZE], *t;
- int err = -ENOMEM;
-
- f = get_path(from, from_tmp, sizeof(from_tmp));
- t = get_path(to, to_tmp, sizeof(to_tmp));
- if((f == NULL) || (t == NULL))
- goto out;
-
- err = os_move_file(f, t);
- out:
- free_path(f, from_tmp);
- free_path(t, to_tmp);
- return(err);
-}
-
-int host_stat_fs(const char *path[], long *bsize_out, long long *blocks_out,
- long long *bfree_out, long long *bavail_out,
- long long *files_out, long long *ffree_out, void *fsid_out,
- int fsid_size, long *namelen_out, long *spare_out)
-{
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_stat_filesystem(file, bsize_out, blocks_out, bfree_out,
- bavail_out, files_out, ffree_out, fsid_out,
- fsid_size, namelen_out, spare_out);
- out:
- free_path(file, tmp);
- return(err);
-}
-
-char *generic_host_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out,
- void *mount)
-{
- return(host_read_dir(stream, pos, ino_out, len_out));
-}
-
-int generic_host_truncate_file(struct file_handle *fh, __u64 size, void *m)
-{
- return(truncate_file(fh, size));
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2000 - 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include "linux/stddef.h"
-#include "linux/string.h"
-#include "linux/types.h"
-#include "linux/errno.h"
-#include "linux/slab.h"
-#include "linux/init.h"
-#include "linux/fs.h"
-#include "linux/stat.h"
-#include "hostfs.h"
-#include "kern.h"
-#include "init.h"
-#include "kern_util.h"
-#include "filehandle.h"
-#include "os.h"
-
-/* Changed in hostfs_args before the kernel starts running */
-static char *jail_dir = "/";
-int append = 0;
-
-static int __init hostfs_args(char *options, int *add)
-{
- char *ptr;
-
- ptr = strchr(options, ',');
- if(ptr != NULL)
- *ptr++ = '\0';
- if(*options != '\0')
- jail_dir = options;
-
- options = ptr;
- while(options){
- ptr = strchr(options, ',');
- if(ptr != NULL)
- *ptr++ = '\0';
- if(*options != '\0'){
- if(!strcmp(options, "append"))
- append = 1;
- else printf("hostfs_args - unsupported option - %s\n",
- options);
- }
- options = ptr;
- }
- return(0);
-}
-
-__uml_setup("hostfs=", hostfs_args,
-"hostfs=<root dir>,<flags>,...\n"
-" This is used to set hostfs parameters. The root directory argument\n"
-" is used to confine all hostfs mounts to within the specified directory\n"
-" tree on the host. If this isn't specified, then a user inside UML can\n"
-" mount anything on the host that's accessible to the user that's running\n"
-" it.\n"
-" The only flag currently supported is 'append', which specifies that all\n"
-" files opened by hostfs will be opened in append mode.\n\n"
-);
-
-struct hostfs_data {
- struct externfs_data ext;
- char *mount;
-};
-
-struct hostfs_file {
- struct externfs_inode ext;
- struct file_handle fh;
-};
-
-static int hostfs_access_file(char *file, int uid, int w, int x, int gid,
- int r, struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
- char tmp[HOSTFS_BUFSIZE];
- int err, mode = 0;
-
- if(r) mode = OS_ACC_R_OK;
- if(w) mode |= OS_ACC_W_OK;
- if(x) mode |= OS_ACC_X_OK;
-
- err = -ENOMEM;
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_access(file, mode);
- free_path(file, tmp);
- out:
- return(err);
-}
-
-static int hostfs_make_node(const char *file, int mode, int uid, int gid,
- int type, int major, int minor,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
- char tmp[HOSTFS_BUFSIZE];
- int err = -ENOMEM;
-
- file = get_path(path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- /* XXX Pass type in an OS-independent way */
- mode |= type;
-
- err = os_make_dev(file, mode, major, minor);
- free_path(file, tmp);
- out:
- return(err);
-}
-
-static int hostfs_stat_file(const char *file, struct externfs_data *ed,
- dev_t *dev_out, unsigned long long *inode_out,
- int *mode_out, int *nlink_out, int *uid_out,
- int *gid_out, unsigned long long *size_out,
- unsigned long *atime_out, unsigned long *mtime_out,
- unsigned long *ctime_out, int *blksize_out,
- unsigned long long *blocks_out)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- /* XXX Why pretend everything is owned by root? */
- *uid_out = 0;
- *gid_out = 0;
- return(host_stat_file(path, dev_out, inode_out, mode_out, nlink_out,
- NULL, NULL, size_out, atime_out, mtime_out,
- ctime_out, blksize_out, blocks_out));
-}
-
-static int hostfs_file_type(const char *file, int *rdev,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_file_type(path, rdev));
-}
-
-static char *hostfs_name(struct inode *inode)
-{
- struct externfs_data *ed = inode_externfs_info(inode);
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
-
- return(inode_name_prefix(inode, mount));
-}
-
-static struct externfs_inode *hostfs_init_file(struct externfs_data *ed)
-{
- struct hostfs_file *hf;
-
- hf = kmalloc(sizeof(*hf), GFP_KERNEL);
- if(hf == NULL)
- return(NULL);
-
- hf->fh.fd = -1;
- return(&hf->ext);
-}
-
-static int hostfs_open_file(struct externfs_inode *ext, char *file,
- int uid, int gid, struct inode *inode,
- struct externfs_data *ed)
-{
- struct hostfs_file *hf = container_of(ext, struct hostfs_file, ext);
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
- int err;
-
- err = host_open_file(path, 1, 1, &hf->fh);
- if(err == -EISDIR)
- goto out;
-
- if(err == -EACCES)
- err = host_open_file(path, 1, 0, &hf->fh);
-
- if(err)
- goto out;
-
- is_reclaimable(&hf->fh, hostfs_name, inode);
- out:
- return(err);
-}
-
-static void *hostfs_open_dir(char *file, int uid, int gid,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_open_dir(path));
-}
-
-static void hostfs_close_dir(void *stream, struct externfs_data *ed)
-{
- os_close_dir(stream);
-}
-
-static char *hostfs_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
-
- return(generic_host_read_dir(stream, pos, ino_out, len_out, mount));
-}
-
-static int hostfs_read_file(struct externfs_inode *ext,
- unsigned long long offset, char *buf, int len,
- int ignore_start, int ignore_end,
- void (*completion)(char *, int, void *), void *arg,
- struct externfs_data *ed)
-{
- struct hostfs_file *hf = container_of(ext, struct hostfs_file, ext);
- int err = 0;
-
- if(ignore_start != 0){
- err = read_file(&hf->fh, offset, buf, ignore_start);
- if(err < 0)
- goto out;
- }
-
- if(ignore_end != len)
- err = read_file(&hf->fh, offset + ignore_end, buf + ignore_end,
- len - ignore_end);
-
- out:
-
- (*completion)(buf, err, arg);
- if (err > 0)
- err = 0;
- return(err);
-}
-
-static int hostfs_write_file(struct externfs_inode *ext,
- unsigned long long offset, const char *buf,
- int start, int len,
- void (*completion)(char *, int, void *),
- void *arg, struct externfs_data *ed)
-{
- struct file_handle *fh;
- int err;
-
- fh = &container_of(ext, struct hostfs_file, ext)->fh;
- err = write_file(fh, offset + start, buf + start, len);
-
- (*completion)((char *) buf, err, arg);
- if (err > 0)
- err = 0;
-
- return(err);
-}
-
-static int hostfs_create_file(struct externfs_inode *ext, char *file, int mode,
- int uid, int gid, struct inode *inode,
- struct externfs_data *ed)
-{
- struct hostfs_file *hf = container_of(ext, struct hostfs_file,
- ext);
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
- int err = -ENOMEM;
-
- err = host_create_file(path, mode, &hf->fh);
- if(err)
- goto out;
-
- is_reclaimable(&hf->fh, hostfs_name, inode);
- out:
- return(err);
-}
-
-static int hostfs_set_attr(const char *file, struct externfs_iattr *attrs,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_set_attr(path, attrs));
-}
-
-static int hostfs_make_symlink(const char *from, const char *to, int uid,
- int gid, struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, from, NULL };
-
- return(host_make_symlink(path, to));
-}
-
-static int hostfs_link_file(const char *to, const char *from, int uid, int gid,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *to_path[] = { jail_dir, mount, to, NULL };
- const char *from_path[] = { jail_dir, mount, from, NULL };
-
- return(host_link_file(to_path, from_path));
-}
-
-static int hostfs_unlink_file(const char *file, struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_unlink_file(path));
-}
-
-static int hostfs_make_dir(const char *file, int mode, int uid, int gid,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_make_dir(path, mode));
-}
-
-static int hostfs_remove_dir(const char *file, int uid, int gid,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_remove_dir(path));
-}
-
-static int hostfs_read_link(char *file, int uid, int gid, char *buf, int size,
- struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, file, NULL };
-
- return(host_read_link(path, buf, size));
-}
-
-static int hostfs_rename_file(char *from, char *to, struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *to_path[] = { jail_dir, mount, to, NULL };
- const char *from_path[] = { jail_dir, mount, from, NULL };
-
- return(host_rename_file(from_path, to_path));
-}
-
-static int hostfs_stat_fs(long *bsize_out, long long *blocks_out,
- long long *bfree_out, long long *bavail_out,
- long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out, struct externfs_data *ed)
-{
- char *mount = container_of(ed, struct hostfs_data, ext)->mount;
- const char *path[] = { jail_dir, mount, NULL };
-
- return(host_stat_fs(path, bsize_out, blocks_out, bfree_out, bavail_out,
- files_out, ffree_out, fsid_out, fsid_size,
- namelen_out, spare_out));
-}
-
-void hostfs_close_file(struct externfs_inode *ext,
- unsigned long long size)
-{
- struct hostfs_file *hf = container_of(ext, struct hostfs_file, ext);
-
- if(hf->fh.fd != -1){
- truncate_file(&hf->fh, size);
- close_file(&hf->fh);
- }
-
- kfree(hf);
-}
-
-int hostfs_truncate_file(struct externfs_inode *ext, __u64 size,
- struct externfs_data *ed)
-{
- struct hostfs_file *hf = container_of(ext, struct hostfs_file, ext);
-
- return(truncate_file(&hf->fh, size));
-}
-
-static struct externfs_file_ops hostfs_file_ops = {
- .stat_file = hostfs_stat_file,
- .file_type = hostfs_file_type,
- .access_file = hostfs_access_file,
- .open_file = hostfs_open_file,
- .open_dir = hostfs_open_dir,
- .read_dir = hostfs_read_dir,
- .read_file = hostfs_read_file,
- .write_file = hostfs_write_file,
- .map_file_page = NULL,
- .close_file = hostfs_close_file,
- .close_dir = hostfs_close_dir,
- .invisible = NULL,
- .create_file = hostfs_create_file,
- .set_attr = hostfs_set_attr,
- .make_symlink = hostfs_make_symlink,
- .unlink_file = hostfs_unlink_file,
- .make_dir = hostfs_make_dir,
- .remove_dir = hostfs_remove_dir,
- .make_node = hostfs_make_node,
- .link_file = hostfs_link_file,
- .read_link = hostfs_read_link,
- .rename_file = hostfs_rename_file,
- .statfs = hostfs_stat_fs,
- .truncate_file = hostfs_truncate_file
-};
-
-static struct externfs_data *mount_fs(char *mount_arg)
-{
- struct hostfs_data *hd;
- int err = -ENOMEM;
-
- hd = kmalloc(sizeof(*hd), GFP_KERNEL);
- if(hd == NULL)
- goto out;
-
- hd->mount = host_root_filename(mount_arg);
- if(hd->mount == NULL)
- goto out_free;
-
- init_externfs(&hd->ext, &hostfs_file_ops);
-
- return(&hd->ext);
-
- out_free:
- kfree(hd);
- out:
- return(ERR_PTR(err));
-}
-
-static struct externfs_mount_ops hostfs_mount_ops = {
- .init_file = hostfs_init_file,
- .mount = mount_fs,
-};
-
-static int __init init_hostfs(void)
-{
- return(register_externfs("hostfs", &hostfs_mount_ops));
-}
-
-static void __exit exit_hostfs(void)
-{
- unregister_externfs("hostfs");
-}
-
-__initcall(init_hostfs);
-__exitcall(exit_hostfs);
-
-#if 0
-module_init(init_hostfs)
-module_exit(exit_hostfs)
-MODULE_LICENSE("GPL");
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
#ifndef __UM_FS_HOSTFS
#define __UM_FS_HOSTFS
-#include "linux/fs.h"
-#include "filehandle.h"
#include "os.h"
/* These are exactly the same definitions as in fs.h, but the names are
* changed so that this file can be included in both kernel and user files.
*/
-#define EXTERNFS_ATTR_MODE 1
-#define EXTERNFS_ATTR_UID 2
-#define EXTERNFS_ATTR_GID 4
-#define EXTERNFS_ATTR_SIZE 8
-#define EXTERNFS_ATTR_ATIME 16
-#define EXTERNFS_ATTR_MTIME 32
-#define EXTERNFS_ATTR_CTIME 64
-#define EXTERNFS_ATTR_ATIME_SET 128
-#define EXTERNFS_ATTR_MTIME_SET 256
-#define EXTERNFS_ATTR_FORCE 512 /* Not a change, but a change it */
-#define EXTERNFS_ATTR_ATTR_FLAG 1024
-
-struct externfs_iattr {
+#define HOSTFS_ATTR_MODE 1
+#define HOSTFS_ATTR_UID 2
+#define HOSTFS_ATTR_GID 4
+#define HOSTFS_ATTR_SIZE 8
+#define HOSTFS_ATTR_ATIME 16
+#define HOSTFS_ATTR_MTIME 32
+#define HOSTFS_ATTR_CTIME 64
+#define HOSTFS_ATTR_ATIME_SET 128
+#define HOSTFS_ATTR_MTIME_SET 256
+#define HOSTFS_ATTR_FORCE 512 /* Not a change, but a change it */
+#define HOSTFS_ATTR_ATTR_FLAG 1024
+
+struct hostfs_iattr {
unsigned int ia_valid;
mode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
loff_t ia_size;
- time_t ia_atime;
- time_t ia_mtime;
- time_t ia_ctime;
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+ struct timespec ia_ctime;
unsigned int ia_attr_flags;
};
-struct externfs_data {
- struct externfs_file_ops *file_ops;
- struct externfs_mount_ops *mount_ops;
-};
-
-struct externfs_inode {
- struct inode vfs_inode;
- struct externfs_file_ops *ops;
-};
-
-struct externfs_mount_ops {
- struct externfs_data *(*mount)(char *mount_arg);
- struct externfs_inode *(*init_file)(struct externfs_data *ed);
-};
-
-struct externfs_file_ops {
- int (*stat_file)(const char *path, struct externfs_data *ed,
- dev_t *dev_out, unsigned long long *inode_out,
- int *mode_out, int *nlink_out, int *uid_out,
- int *gid_out, unsigned long long *size_out,
- unsigned long *atime_out, unsigned long *mtime_out,
- unsigned long *ctime_out, int *blksize_out,
- unsigned long long *blocks_out);
- int (*file_type)(const char *path, int *rdev,
- struct externfs_data *ed);
- int (*access_file)(char *path, int r, int w, int x, int uid, int gid,
- struct externfs_data *ed);
- int (*open_file)(struct externfs_inode *ext, char *file,
- int uid, int gid, struct inode *inode,
- struct externfs_data *ed);
- void (*close_file)(struct externfs_inode *ext,
- unsigned long long size);
- void *(*open_dir)(char *path, int uid, int gid,
- struct externfs_data *ed);
- char *(*read_dir)(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out,
- struct externfs_data *ed);
- int (*read_file)(struct externfs_inode *ext,
- unsigned long long offset, char *buf, int len,
- int ignore_start, int ignore_end,
- void (*completion)(char *, int, void *), void *arg,
- struct externfs_data *ed);
- int (*write_file)(struct externfs_inode *ext,
- unsigned long long offset, const char *buf,
- int start, int len,
- void (*completion)(char *, int, void *), void *arg,
- struct externfs_data *ed);
- int (*map_file_page)(struct externfs_inode *ext,
- unsigned long long offset, char *buf, int w,
- struct externfs_data *ed);
- void (*close_dir)(void *stream, struct externfs_data *ed);
- void (*invisible)(struct externfs_inode *ext);
- int (*create_file)(struct externfs_inode *ext, char *path,
- int mode, int uid, int gid, struct inode *inode,
- struct externfs_data *ed);
- int (*set_attr)(const char *path, struct externfs_iattr *attrs,
- struct externfs_data *ed);
- int (*make_symlink)(const char *from, const char *to, int uid, int gid,
- struct externfs_data *ed);
- int (*unlink_file)(const char *path, struct externfs_data *ed);
- int (*make_dir)(const char *path, int mode, int uid, int gid,
- struct externfs_data *ed);
- int (*remove_dir)(const char *path, int uid, int gid,
- struct externfs_data *ed);
- int (*make_node)(const char *path, int mode, int uid, int gid,
- int type, int maj, int min, struct externfs_data *ed);
- int (*link_file)(const char *to, const char *from, int uid, int gid,
- struct externfs_data *ed);
- int (*read_link)(char *path, int uid, int gid, char *buf, int size,
- struct externfs_data *ed);
- int (*rename_file)(char *from, char *to, struct externfs_data *ed);
- int (*statfs)(long *bsize_out, long long *blocks_out,
- long long *bfree_out, long long *bavail_out,
- long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out, struct externfs_data *ed);
- int (*truncate_file)(struct externfs_inode *ext, __u64 size,
- struct externfs_data *ed);
-};
-
-#define HOSTFS_BUFSIZE 64
-
-extern int register_externfs(char *name, struct externfs_mount_ops *mount_ops);
-extern void unregister_externfs(char *name);
-extern void init_externfs(struct externfs_data *ed,
- struct externfs_file_ops *ops);
-struct externfs_data *inode_externfs_info(struct inode *inode);
-
-extern char *generic_root_filename(char *mount_arg);
-extern void host_close_file(void *stream);
-extern int host_read_file(int fd, unsigned long long offset, char *buf,
- int len);
-extern int host_open_file(const char *path[], int r, int w,
- struct file_handle *fh);
-extern void *host_open_dir(const char *path[]);
-extern char *host_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out);
-extern int host_file_type(const char *path[], int *rdev);
-extern char *host_root_filename(char *mount_arg);
-extern char *get_path(const char *path[], char *buf, int size);
-extern void free_path(const char *buf, char *tmp);
-extern int host_create_file(const char *path[], int mode,
- struct file_handle *fh);
-extern int host_set_attr(const char *path[], struct externfs_iattr *attrs);
-extern int host_make_symlink(const char *from[], const char *to);
-extern int host_unlink_file(const char *path[]);
-extern int host_make_dir(const char *path[], int mode);
-extern int host_remove_dir(const char *path[]);
-extern int host_link_file(const char *to[], const char *from[]);
-extern int host_read_link(const char *path[], char *buf, int size);
-extern int host_rename_file(const char *from[], const char *to[]);
-extern int host_stat_fs(const char *path[], long *bsize_out,
- long long *blocks_out, long long *bfree_out,
- long long *bavail_out, long long *files_out,
- long long *ffree_out, void *fsid_out, int fsid_size,
- long *namelen_out, long *spare_out);
-extern int host_stat_file(const char *path[], int *dev_out,
- unsigned long long *inode_out, int *mode_out,
- int *nlink_out, int *uid_out, int *gid_out,
- unsigned long long *size_out,
- unsigned long *atime_out, unsigned long *mtime_out,
- unsigned long *ctime_out, int *blksize_out,
- unsigned long long *blocks_out);
-
-extern char *generic_host_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out,
- void *mount);
-extern int generic_host_read_file(int fd, unsigned long long offset, char *buf,
- int len, void *mount);
-extern void generic_host_close_file(void *stream, unsigned long long size,
- void *mount);
-extern int generic_host_truncate_file(struct file_handle *fh, __u64 size,
- void *m);
-
-extern char *inode_name_prefix(struct inode *inode, char *prefix);
+extern int stat_file(const char *path, unsigned long long *inode_out,
+ int *mode_out, int *nlink_out, int *uid_out, int *gid_out,
+ unsigned long long *size_out, struct timespec *atime_out,
+ struct timespec *mtime_out, struct timespec *ctime_out,
+ int *blksize_out, unsigned long long *blocks_out);
+extern int access_file(char *path, int r, int w, int x);
+extern int open_file(char *path, int r, int w, int append);
+extern int file_type(const char *path, int *rdev);
+extern void *open_dir(char *path, int *err_out);
+extern char *read_dir(void *stream, unsigned long long *pos,
+ unsigned long long *ino_out, int *len_out);
+extern void close_file(void *stream);
+extern void close_dir(void *stream);
+extern int read_file(int fd, unsigned long long *offset, char *buf, int len);
+extern int write_file(int fd, unsigned long long *offset, const char *buf,
+ int len);
+extern int lseek_file(int fd, long long offset, int whence);
+extern int file_create(char *name, int ur, int uw, int ux, int gr,
+ int gw, int gx, int or, int ow, int ox);
+extern int set_attr(const char *file, struct hostfs_iattr *attrs);
+extern int make_symlink(const char *from, const char *to);
+extern int unlink_file(const char *file);
+extern int do_mkdir(const char *file, int mode);
+extern int do_rmdir(const char *file);
+extern int do_mknod(const char *file, int mode, int dev);
+extern int link_file(const char *from, const char *to);
+extern int do_readlink(char *file, char *buf, int size);
+extern int rename_file(char *from, char *to);
+extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
+ long long *bfree_out, long long *bavail_out,
+ long long *files_out, long long *ffree_out,
+ void *fsid_out, int fsid_size, long *namelen_out,
+ long *spare_out);
#endif
--- /dev/null
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ *
+ * Ported the filesystem routines to 2.5.
+ * 2003-02-10 Petr Baudis <pasky@ucw.cz>
+ */
+
+#include <linux/stddef.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/buffer_head.h>
+#include <linux/root_dev.h>
+#include <linux/statfs.h>
+#include <asm/uaccess.h>
+#include "hostfs.h"
+#include "kern_util.h"
+#include "kern.h"
+#include "user_util.h"
+#include "2_5compat.h"
+#include "init.h"
+
+struct hostfs_inode_info {
+ char *host_filename;
+ int fd;
+ int mode;
+ struct inode vfs_inode;
+};
+
+static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
+{
+ return(list_entry(inode, struct hostfs_inode_info, vfs_inode));
+}
+
+#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_dentry->d_inode)
+
+int hostfs_d_delete(struct dentry *dentry)
+{
+ return(1);
+}
+
+struct dentry_operations hostfs_dentry_ops = {
+ .d_delete = hostfs_d_delete,
+};
+
+/* Changed in hostfs_args before the kernel starts running */
+static char *root_ino = "/";
+static int append = 0;
+
+#define HOSTFS_SUPER_MAGIC 0x00c0ffee
+
+static struct inode_operations hostfs_iops;
+static struct inode_operations hostfs_dir_iops;
+static struct address_space_operations hostfs_link_aops;
+
+static int __init hostfs_args(char *options, int *add)
+{
+ char *ptr;
+
+ ptr = strchr(options, ',');
+ if(ptr != NULL)
+ *ptr++ = '\0';
+ if(*options != '\0')
+ root_ino = options;
+
+ options = ptr;
+ while(options){
+ ptr = strchr(options, ',');
+ if(ptr != NULL)
+ *ptr++ = '\0';
+ if(*options != '\0'){
+ if(!strcmp(options, "append"))
+ append = 1;
+ else printf("hostfs_args - unsupported option - %s\n",
+ options);
+ }
+ options = ptr;
+ }
+ return(0);
+}
+
+__uml_setup("hostfs=", hostfs_args,
+"hostfs=<root dir>,<flags>,...\n"
+" This is used to set hostfs parameters. The root directory argument\n"
+" is used to confine all hostfs mounts to within the specified directory\n"
+" tree on the host. If this isn't specified, then a user inside UML can\n"
+" mount anything on the host that's accessible to the user that's running\n"
+" it.\n"
+" The only flag currently supported is 'append', which specifies that all\n"
+" files opened by hostfs will be opened in append mode.\n\n"
+);
+
+static char *dentry_name(struct dentry *dentry, int extra)
+{
+ struct dentry *parent;
+ char *root, *name;
+ int len;
+
+ len = 0;
+ parent = dentry;
+ while(parent->d_parent != parent){
+ len += parent->d_name.len + 1;
+ parent = parent->d_parent;
+ }
+
+ root = HOSTFS_I(parent->d_inode)->host_filename;
+ len += strlen(root);
+ name = kmalloc(len + extra + 1, GFP_KERNEL);
+ if(name == NULL) return(NULL);
+
+ name[len] = '\0';
+ parent = dentry;
+ while(parent->d_parent != parent){
+ len -= parent->d_name.len + 1;
+ name[len] = '/';
+ strncpy(&name[len + 1], parent->d_name.name,
+ parent->d_name.len);
+ parent = parent->d_parent;
+ }
+ strncpy(name, root, strlen(root));
+ return(name);
+}
+
+static char *inode_name(struct inode *ino, int extra)
+{
+ struct dentry *dentry;
+
+ dentry = list_entry(ino->i_dentry.next, struct dentry, d_alias);
+ return(dentry_name(dentry, extra));
+}
+
+static int read_name(struct inode *ino, char *name)
+{
+ /* The non-int inode fields are copied into ints by stat_file and
+ * then copied into the inode because passing the actual pointers
+ * in and having them treated as int * breaks on big-endian machines
+ */
+ int err;
+ int i_mode, i_nlink, i_blksize;
+ unsigned long long i_size;
+ unsigned long long i_ino;
+ unsigned long long i_blocks;
+
+ err = stat_file(name, &i_ino, &i_mode, &i_nlink, &ino->i_uid,
+ &ino->i_gid, &i_size, &ino->i_atime, &ino->i_mtime,
+ &ino->i_ctime, &i_blksize, &i_blocks);
+ if(err)
+ return(err);
+
+ ino->i_ino = i_ino;
+ ino->i_mode = i_mode;
+ ino->i_nlink = i_nlink;
+ ino->i_size = i_size;
+ ino->i_blksize = i_blksize;
+ ino->i_blocks = i_blocks;
+ if((ino->i_sb->s_dev == ROOT_DEV) && (ino->i_uid == getuid()))
+ ino->i_uid = 0;
+ return(0);
+}
+
+static char *follow_link(char *link)
+{
+ int len, n;
+ char *name, *resolved, *end;
+
+ len = 64;
+ while(1){
+ n = -ENOMEM;
+ name = kmalloc(len, GFP_KERNEL);
+ if(name == NULL)
+ goto out;
+
+ n = do_readlink(link, name, len);
+ if(n < len)
+ break;
+ len *= 2;
+ kfree(name);
+ }
+ if(n < 0)
+ goto out_free;
+
+ if(*name == '/')
+ return(name);
+
+ end = strrchr(link, '/');
+ if(end == NULL)
+ return(name);
+
+ *(end + 1) = '\0';
+ len = strlen(link) + strlen(name) + 1;
+
+ resolved = kmalloc(len, GFP_KERNEL);
+ if(resolved == NULL){
+ n = -ENOMEM;
+ goto out_free;
+ }
+
+ sprintf(resolved, "%s%s", link, name);
+ kfree(name);
+ kfree(link);
+ return(resolved);
+
+ out_free:
+ kfree(name);
+ out:
+ return(ERR_PTR(n));
+}
+
+static int read_inode(struct inode *ino)
+{
+ char *name;
+ int err = 0;
+
+ /* Unfortunately, we are called from iget() when we don't have a dentry
+ * allocated yet.
+ */
+ if(list_empty(&ino->i_dentry))
+ goto out;
+
+ err = -ENOMEM;
+ name = inode_name(ino, 0);
+ if(name == NULL)
+ goto out;
+
+ if(file_type(name, NULL) == OS_TYPE_SYMLINK){
+ name = follow_link(name);
+ if(IS_ERR(name)){
+ err = PTR_ERR(name);
+ goto out;
+ }
+ }
+
+ err = read_name(ino, name);
+ kfree(name);
+ out:
+ return(err);
+}
+
+int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
+{
+ /* do_statfs uses struct statfs64 internally, but the linux kernel
+ * struct statfs still has 32-bit versions for most of these fields,
+ * so we convert them here
+ */
+ int err;
+ long long f_blocks;
+ long long f_bfree;
+ long long f_bavail;
+ long long f_files;
+ long long f_ffree;
+
+ err = do_statfs(HOSTFS_I(sb->s_root->d_inode)->host_filename,
+ &sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
+ &f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
+ &sf->f_namelen, sf->f_spare);
+ if(err) return(err);
+ sf->f_blocks = f_blocks;
+ sf->f_bfree = f_bfree;
+ sf->f_bavail = f_bavail;
+ sf->f_files = f_files;
+ sf->f_ffree = f_ffree;
+ sf->f_type = HOSTFS_SUPER_MAGIC;
+ return(0);
+}
+
+static struct inode *hostfs_alloc_inode(struct super_block *sb)
+{
+ struct hostfs_inode_info *hi;
+
+ hi = kmalloc(sizeof(*hi), GFP_KERNEL);
+ if(hi == NULL)
+ return(NULL);
+
+ *hi = ((struct hostfs_inode_info) { .host_filename = NULL,
+ .fd = -1,
+ .mode = 0 });
+ inode_init_once(&hi->vfs_inode);
+ return(&hi->vfs_inode);
+}
+
+static void hostfs_destroy_inode(struct inode *inode)
+{
+ if(HOSTFS_I(inode)->host_filename)
+ kfree(HOSTFS_I(inode)->host_filename);
+
+ if(HOSTFS_I(inode)->fd != -1)
+ close_file(&HOSTFS_I(inode)->fd);
+
+ kfree(HOSTFS_I(inode));
+}
+
+static void hostfs_read_inode(struct inode *inode)
+{
+ read_inode(inode);
+}
+
+static struct super_operations hostfs_sbops = {
+ .alloc_inode = hostfs_alloc_inode,
+ .destroy_inode = hostfs_destroy_inode,
+ .read_inode = hostfs_read_inode,
+ .statfs = hostfs_statfs,
+};
+
+int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
+{
+ void *dir;
+ char *name;
+ unsigned long long next, ino;
+ int error, len;
+
+ name = dentry_name(file->f_dentry, 0);
+ if(name == NULL) return(-ENOMEM);
+ dir = open_dir(name, &error);
+ kfree(name);
+ if(dir == NULL) return(-error);
+ next = file->f_pos;
+ while((name = read_dir(dir, &next, &ino, &len)) != NULL){
+ error = (*filldir)(ent, name, len, file->f_pos,
+ ino, DT_UNKNOWN);
+ if(error) break;
+ file->f_pos = next;
+ }
+ close_dir(dir);
+ return(0);
+}
+
+int hostfs_file_open(struct inode *ino, struct file *file)
+{
+ char *name;
+ int mode = 0, r = 0, w = 0, fd;
+
+ mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
+ if((mode & HOSTFS_I(ino)->mode) == mode)
+ return(0);
+
+ /* The file may already have been opened, but with the wrong access,
+ * so this resets things and reopens the file with the new access.
+ */
+ if(HOSTFS_I(ino)->fd != -1){
+ close_file(&HOSTFS_I(ino)->fd);
+ HOSTFS_I(ino)->fd = -1;
+ }
+
+ HOSTFS_I(ino)->mode |= mode;
+ if(HOSTFS_I(ino)->mode & FMODE_READ)
+ r = 1;
+ if(HOSTFS_I(ino)->mode & FMODE_WRITE)
+ w = 1;
+ if(w)
+ r = 1;
+
+ name = dentry_name(file->f_dentry, 0);
+ if(name == NULL)
+ return(-ENOMEM);
+
+ fd = open_file(name, r, w, append);
+ kfree(name);
+ if(fd < 0) return(fd);
+ FILE_HOSTFS_I(file)->fd = fd;
+
+ return(0);
+}
+
+int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ return(0);
+}
+
+static struct file_operations hostfs_file_fops = {
+ .llseek = generic_file_llseek,
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .open = hostfs_file_open,
+ .release = NULL,
+ .fsync = hostfs_fsync,
+};
+
+static struct file_operations hostfs_dir_fops = {
+ .readdir = hostfs_readdir,
+ .read = generic_read_dir,
+};
+
+int hostfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ char *buffer;
+ unsigned long long base;
+ int count = PAGE_CACHE_SIZE;
+ int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ int err;
+
+ if (page->index >= end_index)
+ count = inode->i_size & (PAGE_CACHE_SIZE-1);
+
+ buffer = kmap(page);
+ base = ((unsigned long long) page->index) << PAGE_CACHE_SHIFT;
+
+ err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
+ if(err != count){
+ ClearPageUptodate(page);
+ goto out;
+ }
+
+ if (base > inode->i_size)
+ inode->i_size = base;
+
+ if (PageError(page))
+ ClearPageError(page);
+ err = 0;
+
+ out:
+ kunmap(page);
+
+ unlock_page(page);
+ return err;
+}
+
+int hostfs_readpage(struct file *file, struct page *page)
+{
+ char *buffer;
+ long long start;
+ int err = 0;
+
+ start = (long long) page->index << PAGE_CACHE_SHIFT;
+ buffer = kmap(page);
+ err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
+ PAGE_CACHE_SIZE);
+ if(err < 0) goto out;
+
+ memset(&buffer[err], 0, PAGE_CACHE_SIZE - err);
+
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ if (PageError(page)) ClearPageError(page);
+ err = 0;
+ out:
+ kunmap(page);
+ unlock_page(page);
+ return(err);
+}
+
+int hostfs_prepare_write(struct file *file, struct page *page,
+ unsigned int from, unsigned int to)
+{
+ char *buffer;
+ long long start, tmp;
+ int err;
+
+ start = (long long) page->index << PAGE_CACHE_SHIFT;
+ buffer = kmap(page);
+ if(from != 0){
+ tmp = start;
+ err = read_file(FILE_HOSTFS_I(file)->fd, &tmp, buffer,
+ from);
+ if(err < 0) goto out;
+ }
+ if(to != PAGE_CACHE_SIZE){
+ start += to;
+ err = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer + to,
+ PAGE_CACHE_SIZE - to);
+ if(err < 0) goto out;
+ }
+ err = 0;
+ out:
+ kunmap(page);
+ return(err);
+}
+
+int hostfs_commit_write(struct file *file, struct page *page, unsigned from,
+ unsigned to)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ char *buffer;
+ long long start;
+ int err = 0;
+
+ start = (long long) (page->index << PAGE_CACHE_SHIFT) + from;
+ buffer = kmap(page);
+ err = write_file(FILE_HOSTFS_I(file)->fd, &start, buffer + from,
+ to - from);
+ if(err > 0) err = 0;
+ if(!err && (start > inode->i_size))
+ inode->i_size = start;
+
+ kunmap(page);
+ return(err);
+}
+
+static struct address_space_operations hostfs_aops = {
+ .writepage = hostfs_writepage,
+ .readpage = hostfs_readpage,
+/* .set_page_dirty = __set_page_dirty_nobuffers, */
+ .prepare_write = hostfs_prepare_write,
+ .commit_write = hostfs_commit_write
+};
+
+static int init_inode(struct inode *inode, struct dentry *dentry)
+{
+ char *name;
+ int type, err = -ENOMEM, rdev;
+
+ if(dentry){
+ name = dentry_name(dentry, 0);
+ if(name == NULL)
+ goto out;
+ type = file_type(name, &rdev);
+ kfree(name);
+ }
+ else type = OS_TYPE_DIR;
+
+ err = 0;
+ if(type == OS_TYPE_SYMLINK)
+ inode->i_op = &page_symlink_inode_operations;
+ else if(type == OS_TYPE_DIR)
+ inode->i_op = &hostfs_dir_iops;
+ else inode->i_op = &hostfs_iops;
+
+ if(type == OS_TYPE_DIR) inode->i_fop = &hostfs_dir_fops;
+ else inode->i_fop = &hostfs_file_fops;
+
+ if(type == OS_TYPE_SYMLINK)
+ inode->i_mapping->a_ops = &hostfs_link_aops;
+ else inode->i_mapping->a_ops = &hostfs_aops;
+
+ switch (type) {
+ case OS_TYPE_CHARDEV:
+ init_special_inode(inode, S_IFCHR, rdev);
+ break;
+ case OS_TYPE_BLOCKDEV:
+ init_special_inode(inode, S_IFBLK, rdev);
+ break;
+ case OS_TYPE_FIFO:
+ init_special_inode(inode, S_IFIFO, 0);
+ break;
+ case OS_TYPE_SOCK:
+ init_special_inode(inode, S_IFSOCK, 0);
+ break;
+ }
+ out:
+ return(err);
+}
+
+int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ struct inode *inode;
+ char *name;
+ int error, fd;
+
+ error = -ENOMEM;
+ inode = iget(dir->i_sb, 0);
+ if(inode == NULL) goto out;
+
+ error = init_inode(inode, dentry);
+ if(error)
+ goto out_put;
+
+ error = -ENOMEM;
+ name = dentry_name(dentry, 0);
+ if(name == NULL)
+ goto out_put;
+
+ fd = file_create(name,
+ mode & S_IRUSR, mode & S_IWUSR, mode & S_IXUSR,
+ mode & S_IRGRP, mode & S_IWGRP, mode & S_IXGRP,
+ mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH);
+ if(fd < 0)
+ error = fd;
+ else error = read_name(inode, name);
+
+ kfree(name);
+ if(error)
+ goto out_put;
+
+ HOSTFS_I(inode)->fd = fd;
+ HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE;
+ d_instantiate(dentry, inode);
+ return(0);
+
+ out_put:
+ iput(inode);
+ out:
+ return(error);
+}
+
+struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct inode *inode;
+ char *name;
+ int err;
+
+ err = -ENOMEM;
+ inode = iget(ino->i_sb, 0);
+ if(inode == NULL)
+ goto out;
+
+ err = init_inode(inode, dentry);
+ if(err)
+ goto out_put;
+
+ err = -ENOMEM;
+ name = dentry_name(dentry, 0);
+ if(name == NULL)
+ goto out_put;
+
+ err = read_name(inode, name);
+ kfree(name);
+ if(err == -ENOENT){
+ iput(inode);
+ inode = NULL;
+ }
+ else if(err)
+ goto out_put;
+
+ d_add(dentry, inode);
+ dentry->d_op = &hostfs_dentry_ops;
+ return(NULL);
+
+ out_put:
+ iput(inode);
+ out:
+ return(ERR_PTR(err));
+}
+
+static char *inode_dentry_name(struct inode *ino, struct dentry *dentry)
+{
+ char *file;
+ int len;
+
+ file = inode_name(ino, dentry->d_name.len + 1);
+ if(file == NULL) return(NULL);
+ strcat(file, "/");
+ len = strlen(file);
+ strncat(file, dentry->d_name.name, dentry->d_name.len);
+ file[len + dentry->d_name.len] = '\0';
+ return(file);
+}
+
+int hostfs_link(struct dentry *to, struct inode *ino, struct dentry *from)
+{
+ char *from_name, *to_name;
+ int err;
+
+ if((from_name = inode_dentry_name(ino, from)) == NULL)
+ return(-ENOMEM);
+ to_name = dentry_name(to, 0);
+ if(to_name == NULL){
+ kfree(from_name);
+ return(-ENOMEM);
+ }
+ err = link_file(to_name, from_name);
+ kfree(from_name);
+ kfree(to_name);
+ return(err);
+}
+
+int hostfs_unlink(struct inode *ino, struct dentry *dentry)
+{
+ char *file;
+ int err;
+
+ if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+ if(append)
+ return(-EPERM);
+
+ err = unlink_file(file);
+ kfree(file);
+ return(err);
+}
+
+int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to)
+{
+ char *file;
+ int err;
+
+ if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+ err = make_symlink(file, to);
+ kfree(file);
+ return(err);
+}
+
+int hostfs_mkdir(struct inode *ino, struct dentry *dentry, int mode)
+{
+ char *file;
+ int err;
+
+ if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+ err = do_mkdir(file, mode);
+ kfree(file);
+ return(err);
+}
+
+int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
+{
+ char *file;
+ int err;
+
+ if((file = inode_dentry_name(ino, dentry)) == NULL) return(-ENOMEM);
+ err = do_rmdir(file);
+ kfree(file);
+ return(err);
+}
+
+int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+ struct inode *inode;
+ char *name;
+ int err = -ENOMEM;
+
+ inode = iget(dir->i_sb, 0);
+ if(inode == NULL)
+ goto out;
+
+ err = init_inode(inode, dentry);
+ if(err)
+ goto out_put;
+
+ err = -ENOMEM;
+ name = dentry_name(dentry, 0);
+ if(name == NULL)
+ goto out_put;
+
+ init_special_inode(inode, mode, dev);
+ err = do_mknod(name, mode, dev);
+ if(err)
+ goto out_free;
+
+ err = read_name(inode, name);
+ kfree(name);
+ if(err)
+ goto out_put;
+
+ d_instantiate(dentry, inode);
+ return(0);
+
+ out_free:
+ kfree(name);
+ out_put:
+ iput(inode);
+ out:
+ return(err);
+}
+
+int hostfs_rename(struct inode *from_ino, struct dentry *from,
+ struct inode *to_ino, struct dentry *to)
+{
+ char *from_name, *to_name;
+ int err;
+
+ if((from_name = inode_dentry_name(from_ino, from)) == NULL)
+ return(-ENOMEM);
+ if((to_name = inode_dentry_name(to_ino, to)) == NULL){
+ kfree(from_name);
+ return(-ENOMEM);
+ }
+ err = rename_file(from_name, to_name);
+ kfree(from_name);
+ kfree(to_name);
+ return(err);
+}
+
+void hostfs_truncate(struct inode *ino)
+{
+ not_implemented();
+}
+
+int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd)
+{
+ char *name;
+ int r = 0, w = 0, x = 0, err;
+
+ if(desired & MAY_READ) r = 1;
+ if(desired & MAY_WRITE) w = 1;
+ if(desired & MAY_EXEC) x = 1;
+ name = inode_name(ino, 0);
+ if(name == NULL) return(-ENOMEM);
+ err = access_file(name, r, w, x);
+ kfree(name);
+ if(!err) err = vfs_permission(ino, desired);
+ return(err);
+}
+
+int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct hostfs_iattr attrs;
+ char *name;
+ int err;
+
+ if(append)
+ attr->ia_valid &= ~ATTR_SIZE;
+
+ attrs.ia_valid = 0;
+ if(attr->ia_valid & ATTR_MODE){
+ attrs.ia_valid |= HOSTFS_ATTR_MODE;
+ attrs.ia_mode = attr->ia_mode;
+ }
+ if(attr->ia_valid & ATTR_UID){
+ if((dentry->d_inode->i_sb->s_dev == ROOT_DEV) &&
+ (attr->ia_uid == 0))
+ attr->ia_uid = getuid();
+ attrs.ia_valid |= HOSTFS_ATTR_UID;
+ attrs.ia_uid = attr->ia_uid;
+ }
+ if(attr->ia_valid & ATTR_GID){
+ if((dentry->d_inode->i_sb->s_dev == ROOT_DEV) &&
+ (attr->ia_gid == 0))
+ attr->ia_gid = getuid();
+ attrs.ia_valid |= HOSTFS_ATTR_GID;
+ attrs.ia_gid = attr->ia_gid;
+ }
+ if(attr->ia_valid & ATTR_SIZE){
+ attrs.ia_valid |= HOSTFS_ATTR_SIZE;
+ attrs.ia_size = attr->ia_size;
+ }
+ if(attr->ia_valid & ATTR_ATIME){
+ attrs.ia_valid |= HOSTFS_ATTR_ATIME;
+ attrs.ia_atime = attr->ia_atime;
+ }
+ if(attr->ia_valid & ATTR_MTIME){
+ attrs.ia_valid |= HOSTFS_ATTR_MTIME;
+ attrs.ia_mtime = attr->ia_mtime;
+ }
+ if(attr->ia_valid & ATTR_CTIME){
+ attrs.ia_valid |= HOSTFS_ATTR_CTIME;
+ attrs.ia_ctime = attr->ia_ctime;
+ }
+ if(attr->ia_valid & ATTR_ATIME_SET){
+ attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET;
+ }
+ if(attr->ia_valid & ATTR_MTIME_SET){
+ attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET;
+ }
+ name = dentry_name(dentry, 0);
+ if(name == NULL) return(-ENOMEM);
+ err = set_attr(name, &attrs);
+ kfree(name);
+ if(err)
+ return(err);
+
+ return(inode_setattr(dentry->d_inode, attr));
+}
+
+int hostfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ generic_fillattr(dentry->d_inode, stat);
+ return(0);
+}
+
+static struct inode_operations hostfs_iops = {
+ .create = hostfs_create,
+ .link = hostfs_link,
+ .unlink = hostfs_unlink,
+ .symlink = hostfs_symlink,
+ .mkdir = hostfs_mkdir,
+ .rmdir = hostfs_rmdir,
+ .mknod = hostfs_mknod,
+ .rename = hostfs_rename,
+ .truncate = hostfs_truncate,
+ .permission = hostfs_permission,
+ .setattr = hostfs_setattr,
+ .getattr = hostfs_getattr,
+};
+
+static struct inode_operations hostfs_dir_iops = {
+ .create = hostfs_create,
+ .lookup = hostfs_lookup,
+ .link = hostfs_link,
+ .unlink = hostfs_unlink,
+ .symlink = hostfs_symlink,
+ .mkdir = hostfs_mkdir,
+ .rmdir = hostfs_rmdir,
+ .mknod = hostfs_mknod,
+ .rename = hostfs_rename,
+ .truncate = hostfs_truncate,
+ .permission = hostfs_permission,
+ .setattr = hostfs_setattr,
+ .getattr = hostfs_getattr,
+};
+
+int hostfs_link_readpage(struct file *file, struct page *page)
+{
+ char *buffer, *name;
+ long long start;
+ int err;
+
+ start = page->index << PAGE_CACHE_SHIFT;
+ buffer = kmap(page);
+ name = inode_name(page->mapping->host, 0);
+ if(name == NULL) return(-ENOMEM);
+ err = do_readlink(name, buffer, PAGE_CACHE_SIZE);
+ kfree(name);
+ if(err == PAGE_CACHE_SIZE)
+ err = -E2BIG;
+ else if(err > 0){
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ if (PageError(page)) ClearPageError(page);
+ err = 0;
+ }
+ kunmap(page);
+ unlock_page(page);
+ return(err);
+}
+
+static struct address_space_operations hostfs_link_aops = {
+ .readpage = hostfs_link_readpage,
+};
+
+static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+{
+ struct inode *root_inode;
+ char *name, *data = d;
+ int err;
+
+ sb->s_blocksize = 1024;
+ sb->s_blocksize_bits = 10;
+ sb->s_magic = HOSTFS_SUPER_MAGIC;
+ sb->s_op = &hostfs_sbops;
+
+ if((data == NULL) || (*data == '\0'))
+ data = root_ino;
+
+ err = -ENOMEM;
+ name = kmalloc(strlen(data) + 1, GFP_KERNEL);
+ if(name == NULL)
+ goto out;
+
+ strcpy(name, data);
+
+ root_inode = iget(sb, 0);
+ if(root_inode == NULL)
+ goto out_free;
+
+ err = init_inode(root_inode, NULL);
+ if(err)
+ goto out_put;
+
+ HOSTFS_I(root_inode)->host_filename = name;
+
+ err = -ENOMEM;
+ sb->s_root = d_alloc_root(root_inode);
+ if(sb->s_root == NULL)
+ goto out_put;
+
+ err = read_inode(root_inode);
+ if(err)
+ goto out_put;
+
+ return(0);
+
+ out_put:
+ iput(root_inode);
+ out_free:
+ kfree(name);
+ out:
+ return(err);
+}
+
+static struct super_block *hostfs_read_sb(struct file_system_type *type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return(get_sb_nodev(type, flags, data, hostfs_fill_sb_common));
+}
+
+static struct file_system_type hostfs_type = {
+ .owner = THIS_MODULE,
+ .name = "hostfs",
+ .get_sb = hostfs_read_sb,
+ .kill_sb = kill_anon_super,
+ .fs_flags = 0,
+};
+
+static int __init init_hostfs(void)
+{
+ return(register_filesystem(&hostfs_type));
+}
+
+static void __exit exit_hostfs(void)
+{
+ unregister_filesystem(&hostfs_type);
+}
+
+module_init(init_hostfs)
+module_exit(exit_hostfs)
+MODULE_LICENSE("GPL");
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <errno.h>
+#include <utime.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/vfs.h>
+#include "hostfs.h"
+#include "kern_util.h"
+#include "user.h"
+
+int stat_file(const char *path, unsigned long long *inode_out, int *mode_out,
+ int *nlink_out, int *uid_out, int *gid_out,
+ unsigned long long *size_out, struct timespec *atime_out,
+ struct timespec *mtime_out, struct timespec *ctime_out,
+ int *blksize_out, unsigned long long *blocks_out)
+{
+ struct stat64 buf;
+
+ if(lstat64(path, &buf) < 0)
+ return(-errno);
+
+ /* See the Makefile for why STAT64_INO_FIELD is passed in
+ * by the build
+ */
+ if(inode_out != NULL) *inode_out = buf.STAT64_INO_FIELD;
+ if(mode_out != NULL) *mode_out = buf.st_mode;
+ if(nlink_out != NULL) *nlink_out = buf.st_nlink;
+ if(uid_out != NULL) *uid_out = buf.st_uid;
+ if(gid_out != NULL) *gid_out = buf.st_gid;
+ if(size_out != NULL) *size_out = buf.st_size;
+ if(atime_out != NULL) {
+ atime_out->tv_sec = buf.st_atime;
+ atime_out->tv_nsec = 0;
+ }
+ if(mtime_out != NULL) {
+ mtime_out->tv_sec = buf.st_mtime;
+ mtime_out->tv_nsec = 0;
+ }
+ if(ctime_out != NULL) {
+ ctime_out->tv_sec = buf.st_ctime;
+ ctime_out->tv_nsec = 0;
+ }
+ if(blksize_out != NULL) *blksize_out = buf.st_blksize;
+ if(blocks_out != NULL) *blocks_out = buf.st_blocks;
+ return(0);
+}
+
+int file_type(const char *path, int *rdev)
+{
+ struct stat64 buf;
+
+ if(lstat64(path, &buf) < 0)
+ return(-errno);
+ if(rdev != NULL)
+ *rdev = buf.st_rdev;
+
+ if(S_ISDIR(buf.st_mode)) return(OS_TYPE_DIR);
+ else if(S_ISLNK(buf.st_mode)) return(OS_TYPE_SYMLINK);
+ else if(S_ISCHR(buf.st_mode)) return(OS_TYPE_CHARDEV);
+ else if(S_ISBLK(buf.st_mode)) return(OS_TYPE_BLOCKDEV);
+ else if(S_ISFIFO(buf.st_mode))return(OS_TYPE_FIFO);
+ else if(S_ISSOCK(buf.st_mode))return(OS_TYPE_SOCK);
+ else return(OS_TYPE_FILE);
+}
+
+int access_file(char *path, int r, int w, int x)
+{
+ int mode = 0;
+
+ if(r) mode = R_OK;
+ if(w) mode |= W_OK;
+ if(x) mode |= X_OK;
+ if(access(path, mode) != 0) return(-errno);
+ else return(0);
+}
+
+int open_file(char *path, int r, int w, int append)
+{
+ int mode = 0, fd;
+
+ if(r && !w)
+ mode = O_RDONLY;
+ else if(!r && w)
+ mode = O_WRONLY;
+ else if(r && w)
+ mode = O_RDWR;
+ else panic("Impossible mode in open_file");
+
+ if(append)
+ mode |= O_APPEND;
+ fd = open64(path, mode);
+ if(fd < 0) return(-errno);
+ else return(fd);
+}
+
+void *open_dir(char *path, int *err_out)
+{
+ DIR *dir;
+
+ dir = opendir(path);
+ *err_out = errno;
+ if(dir == NULL) return(NULL);
+ return(dir);
+}
+
+char *read_dir(void *stream, unsigned long long *pos,
+ unsigned long long *ino_out, int *len_out)
+{
+ DIR *dir = stream;
+ struct dirent *ent;
+
+ seekdir(dir, *pos);
+ ent = readdir(dir);
+ if(ent == NULL) return(NULL);
+ *len_out = strlen(ent->d_name);
+ *ino_out = ent->d_ino;
+ *pos = telldir(dir);
+ return(ent->d_name);
+}
+
+int read_file(int fd, unsigned long long *offset, char *buf, int len)
+{
+ int n;
+
+ n = pread64(fd, buf, len, *offset);
+ if(n < 0) return(-errno);
+ *offset += n;
+ return(n);
+}
+
+int write_file(int fd, unsigned long long *offset, const char *buf, int len)
+{
+ int n;
+
+ n = pwrite64(fd, buf, len, *offset);
+ if(n < 0) return(-errno);
+ *offset += n;
+ return(n);
+}
+
+int lseek_file(int fd, long long offset, int whence)
+{
+ int ret;
+
+ ret = lseek64(fd, offset, whence);
+ if(ret < 0) return(-errno);
+ return(0);
+}
+
+void close_file(void *stream)
+{
+ close(*((int *) stream));
+}
+
+void close_dir(void *stream)
+{
+ closedir(stream);
+}
+
+int file_create(char *name, int ur, int uw, int ux, int gr,
+ int gw, int gx, int or, int ow, int ox)
+{
+ int mode, fd;
+
+ mode = 0;
+ mode |= ur ? S_IRUSR : 0;
+ mode |= uw ? S_IWUSR : 0;
+ mode |= ux ? S_IXUSR : 0;
+ mode |= gr ? S_IRGRP : 0;
+ mode |= gw ? S_IWGRP : 0;
+ mode |= gx ? S_IXGRP : 0;
+ mode |= or ? S_IROTH : 0;
+ mode |= ow ? S_IWOTH : 0;
+ mode |= ox ? S_IXOTH : 0;
+ fd = open64(name, O_CREAT | O_RDWR, mode);
+ if(fd < 0)
+ return(-errno);
+ return(fd);
+}
+
+int set_attr(const char *file, struct hostfs_iattr *attrs)
+{
+ struct utimbuf buf;
+ int err, ma;
+
+ if(attrs->ia_valid & HOSTFS_ATTR_MODE){
+ if(chmod(file, attrs->ia_mode) != 0) return(-errno);
+ }
+ if(attrs->ia_valid & HOSTFS_ATTR_UID){
+ if(chown(file, attrs->ia_uid, -1)) return(-errno);
+ }
+ if(attrs->ia_valid & HOSTFS_ATTR_GID){
+ if(chown(file, -1, attrs->ia_gid)) return(-errno);
+ }
+ if(attrs->ia_valid & HOSTFS_ATTR_SIZE){
+ if(truncate(file, attrs->ia_size)) return(-errno);
+ }
+ ma = HOSTFS_ATTR_ATIME_SET | HOSTFS_ATTR_MTIME_SET;
+ if((attrs->ia_valid & ma) == ma){
+ buf.actime = attrs->ia_atime.tv_sec;
+ buf.modtime = attrs->ia_mtime.tv_sec;
+ if(utime(file, &buf) != 0) return(-errno);
+ }
+ else {
+ struct timespec ts;
+
+ if(attrs->ia_valid & HOSTFS_ATTR_ATIME_SET){
+ err = stat_file(file, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, &ts, NULL, NULL, NULL);
+ if(err != 0)
+ return(err);
+ buf.actime = attrs->ia_atime.tv_sec;
+ buf.modtime = ts.tv_sec;
+ if(utime(file, &buf) != 0)
+ return(-errno);
+ }
+ if(attrs->ia_valid & HOSTFS_ATTR_MTIME_SET){
+ err = stat_file(file, NULL, NULL, NULL, NULL, NULL,
+ NULL, &ts, NULL, NULL, NULL, NULL);
+ if(err != 0)
+ return(err);
+ buf.actime = ts.tv_sec;
+ buf.modtime = attrs->ia_mtime.tv_sec;
+ if(utime(file, &buf) != 0)
+ return(-errno);
+ }
+ }
+ if(attrs->ia_valid & HOSTFS_ATTR_CTIME) ;
+ if(attrs->ia_valid & (HOSTFS_ATTR_ATIME | HOSTFS_ATTR_MTIME)){
+ err = stat_file(file, NULL, NULL, NULL, NULL, NULL, NULL,
+ &attrs->ia_atime, &attrs->ia_mtime, NULL,
+ NULL, NULL);
+ if(err != 0) return(err);
+ }
+ return(0);
+}
+
+int make_symlink(const char *from, const char *to)
+{
+ int err;
+
+ err = symlink(to, from);
+ if(err) return(-errno);
+ return(0);
+}
+
+int unlink_file(const char *file)
+{
+ int err;
+
+ err = unlink(file);
+ if(err) return(-errno);
+ return(0);
+}
+
+int do_mkdir(const char *file, int mode)
+{
+ int err;
+
+ err = mkdir(file, mode);
+ if(err) return(-errno);
+ return(0);
+}
+
+int do_rmdir(const char *file)
+{
+ int err;
+
+ err = rmdir(file);
+ if(err) return(-errno);
+ return(0);
+}
+
+int do_mknod(const char *file, int mode, int dev)
+{
+ int err;
+
+ err = mknod(file, mode, dev);
+ if(err) return(-errno);
+ return(0);
+}
+
+int link_file(const char *to, const char *from)
+{
+ int err;
+
+ err = link(to, from);
+ if(err) return(-errno);
+ return(0);
+}
+
+int do_readlink(char *file, char *buf, int size)
+{
+ int n;
+
+ n = readlink(file, buf, size);
+ if(n < 0)
+ return(-errno);
+ if(n < size)
+ buf[n] = '\0';
+ return(n);
+}
+
+int rename_file(char *from, char *to)
+{
+ int err;
+
+ err = rename(from, to);
+ if(err < 0) return(-errno);
+ return(0);
+}
+
+int do_statfs(char *root, long *bsize_out, long long *blocks_out,
+ long long *bfree_out, long long *bavail_out,
+ long long *files_out, long long *ffree_out,
+ void *fsid_out, int fsid_size, long *namelen_out,
+ long *spare_out)
+{
+ struct statfs64 buf;
+ int err;
+
+ err = statfs64(root, &buf);
+ if(err < 0) return(-errno);
+ *bsize_out = buf.f_bsize;
+ *blocks_out = buf.f_blocks;
+ *bfree_out = buf.f_bfree;
+ *bavail_out = buf.f_bavail;
+ *files_out = buf.f_files;
+ *ffree_out = buf.f_ffree;
+ memcpy(fsid_out, &buf.f_fsid,
+ sizeof(buf.f_fsid) > fsid_size ? fsid_size :
+ sizeof(buf.f_fsid));
+ *namelen_out = buf.f_namelen;
+ spare_out[0] = buf.f_spare[0];
+ spare_out[1] = buf.f_spare[1];
+ spare_out[2] = buf.f_spare[2];
+ spare_out[3] = buf.f_spare[3];
+ spare_out[4] = buf.f_spare[4];
+ spare_out[5] = buf.f_spare[5];
+ return(0);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/kdev_t.h>
-#include "linux/init.h"
-#include "linux/workqueue.h"
-#include <asm/irq.h>
-#include "hostfs.h"
-#include "mem.h"
-#include "os.h"
-#include "mode.h"
-#include "aio.h"
-#include "irq_user.h"
-#include "irq_kern.h"
-#include "filehandle.h"
-#include "metadata.h"
-
-#define HUMFS_VERSION 2
-
-static int humfs_stat_file(const char *path, struct externfs_data *ed,
- dev_t *dev_out, unsigned long long *inode_out,
- int *mode_out, int *nlink_out, int *uid_out,
- int *gid_out, unsigned long long *size_out,
- unsigned long *atime_out, unsigned long *mtime_out,
- unsigned long *ctime_out, int *blksize_out,
- unsigned long long *blocks_out)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int err, mode, perms, major, minor;
- char type;
-
- err = host_stat_file(data_path, NULL, inode_out, mode_out,
- nlink_out, NULL, NULL, size_out, atime_out,
- mtime_out, ctime_out, blksize_out, blocks_out);
- if(err)
- return(err);
-
- err = (*mount->meta->ownerships)(path, &perms, uid_out, gid_out,
- &type, &major, &minor, mount);
- if(err)
- return(err);
-
- *mode_out = (*mode_out & ~S_IRWXUGO) | perms;
-
- mode = 0;
- switch(type){
- case 'c':
- mode = S_IFCHR;
- *dev_out = MKDEV(major, minor);
- break;
- case 'b':
- mode = S_IFBLK;
- *dev_out = MKDEV(major, minor);
- break;
- case 's':
- mode = S_IFSOCK;
- break;
- default:
- break;
- }
-
- if(mode != 0)
- *mode_out = (*mode_out & ~S_IFMT) | mode;
-
- return(0);
-}
-
-static int meta_type(const char *path, int *dev_out, void *m)
-{
- struct humfs *mount = m;
- int err, type, maj, min;
- char c;
-
- err = (*mount->meta->ownerships)(path, NULL, NULL, NULL, &c, &maj,
- &min, mount);
- if(err)
- return(err);
-
- if(c == 0)
- return(0);
-
- if(dev_out)
- *dev_out = MKDEV(maj, min);
-
- switch(c){
- case 'c':
- type = OS_TYPE_CHARDEV;
- break;
- case 'b':
- type = OS_TYPE_BLOCKDEV;
- break;
- case 'p':
- type = OS_TYPE_FIFO;
- break;
- case 's':
- type = OS_TYPE_SOCK;
- break;
- default:
- type = -EINVAL;
- break;
- }
-
- return(type);
-}
-
-static int humfs_file_type(const char *path, int *dev_out,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int type;
-
- type = meta_type(path, dev_out, mount);
- if(type != 0)
- return(type);
-
- return(host_file_type(data_path, dev_out));
-}
-
-static char *humfs_data_name(struct inode *inode)
-{
- struct externfs_data *ed = inode_externfs_info(inode);
- struct humfs *mount = container_of(ed, struct humfs, ext);
-
- return(inode_name_prefix(inode, mount->data));
-}
-
-static struct externfs_inode *humfs_init_file(struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct humfs_file *hf;
-
- hf = (*mount->meta->init_file)();
- if(hf == NULL)
- return(NULL);
-
- hf->data.fd = -1;
- return(&hf->ext);
-}
-
-static int humfs_open_file(struct externfs_inode *ext, char *path, int uid,
- int gid, struct inode *inode,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- struct openflags flags;
- char tmp[HOSTFS_BUFSIZE], *file;
- int err = -ENOMEM;
-
- file = get_path(data_path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- flags = of_rdwr(OPENFLAGS());
- if(mount->direct)
- flags = of_direct(flags);
-
- if(path == NULL)
- path = "";
- err = (*mount->meta->open_file)(hf, path, inode, mount);
- if(err)
- goto out_free;
-
- err = open_filehandle(file, flags, 0, &hf->data);
- if(err == -EISDIR)
- goto out;
- else if(err == -EPERM){
- flags = of_set_rw(flags, 1, 0);
- err = open_filehandle(file, flags, 0, &hf->data);
- }
-
- if(err)
- goto out_close;
-
- hf->mount = mount;
- is_reclaimable(&hf->data, humfs_data_name, inode);
-
- out_free:
- free_path(file, tmp);
- out:
- return(err);
-
- out_close:
- (*mount->meta->close_file)(hf);
- goto out_free;
-}
-
-static void *humfs_open_dir(char *path, int uid, int gid,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
-
- return(host_open_dir(data_path));
-}
-
-static void humfs_close_dir(void *stream, struct externfs_data *ed)
-{
- os_close_dir(stream);
-}
-
-static char *humfs_read_dir(void *stream, unsigned long long *pos,
- unsigned long long *ino_out, int *len_out,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
-
- return(generic_host_read_dir(stream, pos, ino_out, len_out, mount));
-}
-
-LIST_HEAD(humfs_replies);
-
-struct humfs_aio {
- struct aio_context aio;
- struct list_head list;
- void (*completion)(char *, int, void *);
- char *buf;
- int real_len;
- int err;
- void *data;
-};
-
-static int humfs_reply_fd = -1;
-
-struct humfs_aio last_task_aio, last_intr_aio;
-struct humfs_aio *last_task_aio_ptr, *last_intr_aio_ptr;
-
-void humfs_work_proc(void *unused)
-{
- struct humfs_aio *aio;
- unsigned long flags;
-
- while(!list_empty(&humfs_replies)){
- local_irq_save(flags);
- aio = list_entry(humfs_replies.next, struct humfs_aio, list);
-
- last_task_aio = *aio;
- last_task_aio_ptr = aio;
-
- list_del(&aio->list);
- local_irq_restore(flags);
-
- if(aio->err >= 0)
- aio->err = aio->real_len;
- (*aio->completion)(aio->buf, aio->err, aio->data);
- kfree(aio);
- }
-}
-
-DECLARE_WORK(humfs_work, humfs_work_proc, NULL);
-
-static irqreturn_t humfs_interrupt(int irq, void *dev_id,
- struct pt_regs *unused)
-{
- struct aio_thread_reply reply;
- struct humfs_aio *aio;
- int err, fd = (int) dev_id;
-
- while(1){
- err = os_read_file(fd, &reply, sizeof(reply));
- if(err < 0){
- if(err == -EAGAIN)
- break;
- printk("humfs_interrupt - read returned err %d\n",
- -err);
- return(IRQ_HANDLED);
- }
- aio = reply.data;
- aio->err = reply.err;
- list_add(&aio->list, &humfs_replies);
- last_intr_aio = *aio;
- last_intr_aio_ptr = aio;
- }
-
- if(!list_empty(&humfs_replies))
- schedule_work(&humfs_work);
- reactivate_fd(fd, HUMFS_IRQ);
- return(IRQ_HANDLED);
-}
-
-static int init_humfs_aio(void)
-{
- int fds[2], err;
-
- err = os_pipe(fds, 1, 1);
- if(err){
- printk("init_humfs_aio - pipe failed, err = %d\n", -err);
- goto out;
- }
-
- err = um_request_irq(HUMFS_IRQ, fds[0], IRQ_READ, humfs_interrupt,
- SA_INTERRUPT | SA_SAMPLE_RANDOM, "humfs",
- (void *) fds[0]);
- if(err){
- printk("init_humfs_aio - : um_request_irq failed, err = %d\n",
- err);
- goto out_close;
- }
-
- humfs_reply_fd = fds[1];
- goto out;
-
- out_close:
- os_close_file(fds[0]);
- os_close_file(fds[1]);
- out:
- return(0);
-}
-
-__initcall(init_humfs_aio);
-
-static int humfs_aio(enum aio_type type, int fd, unsigned long long offset,
- char *buf, int len, int real_len,
- void (*completion)(char *, int, void *), void *arg)
-{
- struct humfs_aio *aio;
- int err = -ENOMEM;
-
- aio = kmalloc(sizeof(*aio), GFP_KERNEL);
- if(aio == NULL)
- goto out;
- *aio = ((struct humfs_aio) { .aio = INIT_AIO_CONTEXT,
- .list = LIST_HEAD_INIT(aio->list),
- .completion= completion,
- .buf = buf,
- .err = 0,
- .real_len = real_len,
- .data = arg });
-
- err = submit_aio(type, fd, buf, len, offset, humfs_reply_fd, aio);
- if(err)
- (*completion)(buf, err, arg);
-
- out:
- return(err);
-}
-
-static int humfs_read_file(struct externfs_inode *ext,
- unsigned long long offset, char *buf, int len,
- int ignore_start, int ignore_end,
- void (*completion)(char *, int, void *), void *arg,
- struct externfs_data *ed)
-{
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- int fd = filehandle_fd(&hf->data);
-
- if(fd < 0){
- (*completion)(buf, fd, arg);
- return(fd);
- }
-
- return(humfs_aio(AIO_READ, fd, offset, buf, len, len, completion,
- arg));
-}
-
-static int humfs_write_file(struct externfs_inode *ext,
- unsigned long long offset,
- const char *buf, int start, int len,
- void (*completion)(char *, int, void *), void *arg,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- int err, orig_len = len, fd = filehandle_fd(&hf->data);
-
- if(fd < 0){
- (*completion)((char *) buf, fd, arg);
- return(fd);
- }
-
- if(mount->direct)
- len = PAGE_SIZE;
- else {
- offset += start;
- buf += start;
- }
-
- err = humfs_aio(AIO_WRITE, fd, offset, (char *) buf, len, orig_len,
- completion, arg);
-
- if(err < 0)
- return(err);
-
- if(mount->direct)
- err = orig_len;
-
- return(err);
-}
-
-static int humfs_map_file_page(struct externfs_inode *ext,
- unsigned long long offset, char *buf, int w,
- struct externfs_data *ed)
-{
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- unsigned long long size, need;
- int err, fd = filehandle_fd(&hf->data);
-
- if(fd < 0)
- return(fd);
-
- err = os_fd_size(fd, &size);
- if(err)
- return(err);
-
- need = offset + PAGE_SIZE;
- if(size < need){
- err = os_truncate_fd(fd, need);
- if(err)
- return(err);
- }
-
- return(physmem_subst_mapping(buf, fd, offset, w));
-}
-
-static void humfs_close_file(struct externfs_inode *ext,
- unsigned long long size)
-{
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- int fd;
-
- if(hf->data.fd == -1)
- return;
-
- fd = filehandle_fd(&hf->data);
- physmem_forget_descriptor(fd);
- truncate_file(&hf->data, size);
- close_file(&hf->data);
-
- (*hf->mount->meta->close_file)(hf);
-}
-
-/* XXX Assumes that you can't make a normal file */
-
-static int humfs_make_node(const char *path, int mode, int uid, int gid,
- int type, int major, int minor,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct file_handle fh;
- const char *data_path[3] = { mount->data, path, NULL };
- int err;
- char t;
-
- err = host_create_file(data_path, S_IRWXUGO, &fh);
- if(err)
- goto out;
-
- close_file(&fh);
-
- switch(type){
- case S_IFCHR:
- t = 'c';
- break;
- case S_IFBLK:
- t = 'b';
- break;
- case S_IFIFO:
- t = 'p';
- break;
- case S_IFSOCK:
- t = 's';
- break;
- default:
- err = -EINVAL;
- printk("make_node - bad node type : %d\n", type);
- goto out_rm;
- }
-
- err = (*mount->meta->make_node)(path, mode, uid, gid, t, major, minor,
- mount);
- if(err)
- goto out_rm;
-
- out:
- return(err);
-
- out_rm:
- host_unlink_file(data_path);
- goto out;
-}
-
-static int humfs_create_file(struct externfs_inode *ext, char *path, int mode,
- int uid, int gid, struct inode *inode,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int err;
-
- err = (*mount->meta->create_file)(hf, path, mode, uid, gid, inode,
- mount);
- if(err)
- goto out;
-
- err = host_create_file(data_path, S_IRWXUGO, &hf->data);
- if(err)
- goto out_rm;
-
-
- is_reclaimable(&hf->data, humfs_data_name, inode);
-
- return(0);
-
- out_rm:
- (*mount->meta->remove_file)(path, mount);
- (*mount->meta->close_file)(hf);
- out:
- return(err);
-}
-
-static int humfs_set_attr(const char *path, struct externfs_iattr *attrs,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int (*chown)(const char *, int, int, int, struct humfs *);
- int err;
-
- chown = mount->meta->change_ownerships;
- if(attrs->ia_valid & EXTERNFS_ATTR_MODE){
- err = (*chown)(path, attrs->ia_mode, -1, -1, mount);
- if(err)
- return(err);
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_UID){
- err = (*chown)(path, -1, attrs->ia_uid, -1, mount);
- if(err)
- return(err);
- }
- if(attrs->ia_valid & EXTERNFS_ATTR_GID){
- err = (*chown)(path, -1, -1, attrs->ia_gid, mount);
- if(err)
- return(err);
- }
-
- attrs->ia_valid &= ~(EXTERNFS_ATTR_MODE | EXTERNFS_ATTR_UID |
- EXTERNFS_ATTR_GID);
-
- return(host_set_attr(data_path, attrs));
-}
-
-static int humfs_make_symlink(const char *from, const char *to, int uid,
- int gid, struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- struct humfs_file *hf;
- const char *data_path[3] = { mount->data, from, NULL };
- int err = -ENOMEM;
-
- hf = (*mount->meta->init_file)();
- if(hf == NULL)
- goto out;
-
- err = (*mount->meta->create_file)(hf, from, S_IRWXUGO, uid, gid, NULL,
- mount);
- if(err)
- goto out_close;
-
- err = host_make_symlink(data_path, to);
- if(err)
- (*mount->meta->remove_file)(from, mount);
-
- out_close:
- (*mount->meta->close_file)(hf);
- out:
- return(err);
-}
-
-static int humfs_link_file(const char *to, const char *from, int uid, int gid,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path_from[3] = { mount->data, from, NULL };
- const char *data_path_to[3] = { mount->data, to, NULL };
- int err;
-
- err = (*mount->meta->create_link)(to, from, mount);
- if(err)
- return(err);
-
- err = host_link_file(data_path_to, data_path_from);
- if(err)
- (*mount->meta->remove_file)(from, mount);
-
- return(err);
-}
-
-static int humfs_unlink_file(const char *path, struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int err;
-
- err = (*mount->meta->remove_file)(path, mount);
- if (err)
- return err;
-
- (*mount->meta->remove_file)(path, mount);
- return(host_unlink_file(data_path));
-}
-
-static void humfs_invisible(struct externfs_inode *ext)
-{
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
- struct humfs *mount = hf->mount;
-
- (*mount->meta->invisible)(hf);
- not_reclaimable(&hf->data);
-}
-
-static int humfs_make_dir(const char *path, int mode, int uid, int gid,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int err;
-
- err = (*mount->meta->create_dir)(path, mode, uid, gid, mount);
- if(err)
- return(err);
-
- err = host_make_dir(data_path, S_IRWXUGO);
- if(err)
- (*mount->meta->remove_dir)(path, mount);
-
- return(err);
-}
-
-static int humfs_remove_dir(const char *path, int uid, int gid,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, path, NULL };
- int err;
-
- err = host_remove_dir(data_path);
- if (err)
- return err;
-
- (*mount->meta->remove_dir)(path, mount);
-
- return(err);
-}
-
-static int humfs_read_link(char *file, int uid, int gid, char *buf, int size,
- struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, file, NULL };
-
- return(host_read_link(data_path, buf, size));
-}
-
-struct humfs *inode_humfs_info(struct inode *inode)
-{
- return(container_of(inode_externfs_info(inode), struct humfs, ext));
-}
-
-static int humfs_rename_file(char *from, char *to, struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path_from[3] = { mount->data, from, NULL };
- const char *data_path_to[3] = { mount->data, to, NULL };
- int err;
-
- err = (*mount->meta->rename_file)(from, to, mount);
- if(err)
- return(err);
-
- err = host_rename_file(data_path_from, data_path_to);
- if(err)
- (*mount->meta->rename_file)(to, from, mount);
-
- return(err);
-}
-
-static int humfs_stat_fs(long *bsize_out, long long *blocks_out,
- long long *bfree_out, long long *bavail_out,
- long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out, struct externfs_data *ed)
-{
- struct humfs *mount = container_of(ed, struct humfs, ext);
- const char *data_path[3] = { mount->data, NULL };
- int err;
-
- /* XXX Needs to maintain this info as metadata */
- err = host_stat_fs(data_path, bsize_out, blocks_out, bfree_out,
- bavail_out, files_out, ffree_out, fsid_out,
- fsid_size, namelen_out, spare_out);
- if(err)
- return(err);
-
- *blocks_out = mount->total / *bsize_out;
- *bfree_out = (mount->total - mount->used) / *bsize_out;
- *bavail_out = (mount->total - mount->used) / *bsize_out;
- return(0);
-}
-
-int humfs_truncate_file(struct externfs_inode *ext, __u64 size,
- struct externfs_data *ed)
-{
- struct humfs_file *hf = container_of(ext, struct humfs_file, ext);
-
- return(truncate_file(&hf->data, size));
-}
-
-char *humfs_path(char *dir, char *file)
-{
- int need_slash, len = strlen(dir) + strlen(file);
- char *new;
-
- need_slash = (dir[strlen(dir) - 1] != '/');
- if(need_slash)
- len++;
-
- new = kmalloc(len + 1, GFP_KERNEL);
- if(new == NULL)
- return(NULL);
-
- strcpy(new, dir);
- if(need_slash)
- strcat(new, "/");
- strcat(new, file);
-
- return(new);
-}
-
-DECLARE_MUTEX(meta_sem);
-struct list_head metas = LIST_HEAD_INIT(metas);
-
-static struct humfs_meta_ops *find_meta(const char *name)
-{
- struct list_head *ele;
- struct humfs_meta_ops *m;
-
- down(&meta_sem);
- list_for_each(ele, &metas){
- m = list_entry(ele, struct humfs_meta_ops, list);
- if(!strcmp(m->name, name))
- goto out;
- }
- m = NULL;
- out:
- up(&meta_sem);
- return(m);
-}
-
-void register_meta(struct humfs_meta_ops *ops)
-{
- down(&meta_sem);
- list_add(&ops->list, &metas);
- up(&meta_sem);
-}
-
-void unregister_meta(struct humfs_meta_ops *ops)
-{
- down(&meta_sem);
- list_del(&ops->list);
- up(&meta_sem);
-}
-
-static struct humfs *read_superblock(char *root)
-{
- struct humfs *mount;
- struct humfs_meta_ops *meta = NULL;
- struct file_handle *fh;
- const char *path[] = { root, "superblock", NULL };
- u64 used, total;
- char meta_buf[33], line[HOSTFS_BUFSIZE], *newline;
- unsigned long long pos;
- int version, i, n, err;
-
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if(fh == NULL)
- return(ERR_PTR(-ENOMEM));
-
- err = host_open_file(path, 1, 0, fh);
- if(err){
- printk("Failed to open %s/%s, errno = %d\n", path[0],
- path[1], err);
- return(ERR_PTR(err));
- }
-
- used = 0;
- total = 0;
- pos = 0;
- i = 0;
- while(1){
- n = read_file(fh, pos, &line[i], sizeof(line) - i - 1);
- if((n == 0) && (i == 0))
- break;
- if(n < 0)
- return(ERR_PTR(n));
-
- pos += n;
- if(n > 0)
- line[n + i] = '\0';
-
- newline = strchr(line, '\n');
- if(newline == NULL){
- printk("read_superblock - line too long : '%s'\n",
- line);
- return(ERR_PTR(-EINVAL));
- }
- newline++;
-
- if(sscanf(line, "version %d\n", &version) == 1){
- if(version != HUMFS_VERSION){
- printk("humfs version mismatch - want version "
- "%d, got version %d.\n", HUMFS_VERSION,
- version);
- return(ERR_PTR(-EINVAL));
- }
- }
- else if(sscanf(line, "used %Lu\n", &used) == 1) ;
- else if(sscanf(line, "total %Lu\n", &total) == 1) ;
- else if(sscanf(line, "metadata %32s\n", meta_buf) == 1){
- meta = find_meta(meta_buf);
- if(meta == NULL){
- printk("read_superblock - meta api \"%s\" not "
- "registered\n", meta_buf);
- return(ERR_PTR(-EINVAL));
- }
- }
-
- else {
- printk("read_superblock - bogus line : '%s'\n", line);
- return(ERR_PTR(-EINVAL));
- }
-
- i = newline - line;
- memmove(line, newline, sizeof(line) - i);
- i = strlen(line);
- }
-
- if(used == 0){
- printk("read_superblock - used not specified or set to "
- "zero\n");
- return(ERR_PTR(-EINVAL));
- }
- if(total == 0){
- printk("read_superblock - total not specified or set to "
- "zero\n");
- return(ERR_PTR(-EINVAL));
- }
- if(used > total){
- printk("read_superblock - used is greater than total\n");
- return(ERR_PTR(-EINVAL));
- }
-
- if(meta == NULL){
- meta = find_meta("shadow_fs");
- }
-
- if(meta == NULL){
- printk("read_superblock - valid meta api was not specified\n");
- return(ERR_PTR(-EINVAL));
- }
-
- mount = (*meta->init_mount)(root);
- if(IS_ERR(mount))
- return(mount);
-
- *mount = ((struct humfs) { .total = total,
- .used = used,
- .meta = meta });
- return(mount);
-}
-
-struct externfs_file_ops humfs_no_mmap_file_ops = {
- .stat_file = humfs_stat_file,
- .file_type = humfs_file_type,
- .access_file = NULL,
- .open_file = humfs_open_file,
- .open_dir = humfs_open_dir,
- .read_dir = humfs_read_dir,
- .read_file = humfs_read_file,
- .write_file = humfs_write_file,
- .map_file_page = NULL,
- .close_file = humfs_close_file,
- .close_dir = humfs_close_dir,
- .invisible = humfs_invisible,
- .create_file = humfs_create_file,
- .set_attr = humfs_set_attr,
- .make_symlink = humfs_make_symlink,
- .unlink_file = humfs_unlink_file,
- .make_dir = humfs_make_dir,
- .remove_dir = humfs_remove_dir,
- .make_node = humfs_make_node,
- .link_file = humfs_link_file,
- .read_link = humfs_read_link,
- .rename_file = humfs_rename_file,
- .statfs = humfs_stat_fs,
- .truncate_file = humfs_truncate_file
-};
-
-struct externfs_file_ops humfs_mmap_file_ops = {
- .stat_file = humfs_stat_file,
- .file_type = humfs_file_type,
- .access_file = NULL,
- .open_file = humfs_open_file,
- .open_dir = humfs_open_dir,
- .invisible = humfs_invisible,
- .read_dir = humfs_read_dir,
- .read_file = humfs_read_file,
- .write_file = humfs_write_file,
- .map_file_page = humfs_map_file_page,
- .close_file = humfs_close_file,
- .close_dir = humfs_close_dir,
- .create_file = humfs_create_file,
- .set_attr = humfs_set_attr,
- .make_symlink = humfs_make_symlink,
- .unlink_file = humfs_unlink_file,
- .make_dir = humfs_make_dir,
- .remove_dir = humfs_remove_dir,
- .make_node = humfs_make_node,
- .link_file = humfs_link_file,
- .read_link = humfs_read_link,
- .rename_file = humfs_rename_file,
- .statfs = humfs_stat_fs,
- .truncate_file = humfs_truncate_file
-};
-
-static struct externfs_data *mount_fs(char *mount_arg)
-{
- char *root, *data, *flags;
- struct humfs *mount;
- struct externfs_file_ops *file_ops;
- int err, do_mmap = 0;
-
- if(mount_arg == NULL){
- printk("humfs - no host directory specified\n");
- return(NULL);
- }
-
- flags = strchr((char *) mount_arg, ',');
- if(flags != NULL){
- do {
- *flags++ = '\0';
-
- if(!strcmp(flags, "mmap"))
- do_mmap = 1;
-
- flags = strchr(flags, ',');
- } while(flags != NULL);
- }
-
- err = -ENOMEM;
- root = host_root_filename(mount_arg);
- if(root == NULL)
- goto err;
-
- mount = read_superblock(root);
- if(IS_ERR(mount)){
- err = PTR_ERR(mount);
- goto err_free_root;
- }
-
- data = humfs_path(root, "data/");
- if(data == NULL)
- goto err_free_mount;
-
- if(CHOOSE_MODE(do_mmap, 0)){
- printk("humfs doesn't support mmap in tt mode\n");
- do_mmap = 0;
- }
-
- mount->data = data;
- mount->mmap = do_mmap;
-
- file_ops = do_mmap ? &humfs_mmap_file_ops : &humfs_no_mmap_file_ops;
- init_externfs(&mount->ext, file_ops);
-
- return(&mount->ext);
-
- err_free_mount:
- kfree(mount);
- err_free_root:
- kfree(root);
- err:
- return(NULL);
-}
-
-struct externfs_mount_ops humfs_mount_ops = {
- .init_file = humfs_init_file,
- .mount = mount_fs,
-};
-
-static int __init init_humfs(void)
-{
- return(register_externfs("humfs", &humfs_mount_ops));
-}
-
-static void __exit exit_humfs(void)
-{
- unregister_externfs("humfs");
-}
-
-__initcall(init_humfs);
-__exitcall(exit_humfs);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include "hostfs.h"
-#include "metadata.h"
-#include "kern_util.h"
-
-#define METADATA_FILE_PATH(meta) (meta)->root, "file_metadata"
-#define METADATA_DIR_PATH(meta) (meta)->root, "dir_metadata"
-
-struct meta_fs {
- struct humfs humfs;
- char *root;
-};
-
-struct meta_file {
- struct humfs_file humfs;
- struct file_handle fh;
-};
-
-static int meta_file_path(const char *path, struct meta_fs *meta,
- const char *path_out[])
-{
- const char *data_path[] = { meta->root, "data", path, NULL };
- char data_tmp[HOSTFS_BUFSIZE];
- char *data_file = get_path(data_path, data_tmp, sizeof(data_tmp));
-
- if(data_file == NULL)
- return(-ENOMEM);
-
- path_out[0] = meta->root;
- path_out[2] = path;
- if(os_file_type(data_file) == OS_TYPE_DIR){
- path_out[1] = "dir_metadata";
- path_out[3] = "metadata";
- path_out[4] = NULL;
- }
- else {
- path_out[1] = "file_metadata";
- path_out[3] = NULL;
- }
-
- return(0);
-}
-
-static int open_meta_file(const char *path, struct humfs *humfs,
- struct file_handle *fh)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- const char *meta_path[5];
- char meta_tmp[HOSTFS_BUFSIZE];
- char *meta_file;
- int err;
-
- err = meta_file_path(path, meta, meta_path);
- if(err)
- goto out;
-
- meta_file = get_path(meta_path, meta_tmp, sizeof(meta_tmp));
- if(meta_file == NULL)
- goto out;
-
- err = open_filehandle(meta_file, of_rdwr(OPENFLAGS()), 0, fh);
-
- out:
- return(err);
-}
-
-static char *meta_fs_name(struct inode *inode)
-{
- struct humfs *mount = inode_humfs_info(inode);
- struct meta_fs *meta = container_of(mount, struct meta_fs, humfs);
- const char *metadata_path[5];
- char tmp[HOSTFS_BUFSIZE], *name, *file;
-
- if(meta_file_path("", meta, metadata_path))
- return(NULL);
-
- file = get_path(metadata_path, tmp, sizeof(tmp));
- if(file == NULL)
- return(NULL);
-
- name = inode_name_prefix(inode, file);
-
- free_path(file, tmp);
- return(name);
-}
-
-static void metafs_invisible(struct humfs_file *hf)
-{
- struct meta_file *mf = container_of(hf, struct meta_file, humfs);
-
- not_reclaimable(&mf->fh);
-}
-
-static struct humfs_file *metafs_init_file(void)
-{
- struct meta_file *mf;
- int err = -ENOMEM;
-
- mf = kmalloc(sizeof(*mf), GFP_KERNEL);
- if(mf == NULL)
- return(ERR_PTR(err));
-
- return(&mf->humfs);
-}
-
-static int metafs_open_file(struct humfs_file *hf, const char *path,
- struct inode *inode, struct humfs *humfs)
-{
- struct meta_file *mf = container_of(hf, struct meta_file, humfs);
- int err;
-
- err = open_meta_file(path, humfs, &mf->fh);
- if(err)
- return(err);
-
- is_reclaimable(&mf->fh, meta_fs_name, inode);
-
- return(0);
-}
-
-static void metafs_close_file(struct humfs_file *hf)
-{
- struct meta_file *meta = container_of(hf, struct meta_file, humfs);
-
- close_file(&meta->fh);
- kfree(meta);
-}
-
-static int metafs_create_file(struct humfs_file *hf, const char *path,
- int mode, int uid, int gid, struct inode *inode,
- struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- struct meta_file *mf = container_of(hf, struct meta_file, humfs);
- char tmp[HOSTFS_BUFSIZE];
- const char *metadata_path[] = { METADATA_FILE_PATH(meta), path, NULL };
- char *file = get_path(metadata_path, tmp, sizeof(tmp));
- char buf[sizeof("mmmm uuuuuuuuuu gggggggggg")];
- int err = -ENOMEM;
-
- if(file == NULL)
- goto out;
-
- err = open_filehandle(file, of_write(of_create(OPENFLAGS())), 0644,
- &mf->fh);
- if(err)
- goto out_free_path;
-
- if(inode != NULL)
- is_reclaimable(&mf->fh, meta_fs_name, inode);
-
- sprintf(buf, "%d %d %d\n", mode & S_IRWXUGO, uid, gid);
- err = write_file(&mf->fh, 0, buf, strlen(buf));
- if(err < 0)
- goto out_rm;
-
- free_path(file, tmp);
- return(0);
-
- out_rm:
- close_file(&mf->fh);
- os_remove_file(file);
- out_free_path:
- free_path(file, tmp);
- out:
- return(err);
-}
-
-static int metafs_create_link(const char *to, const char *from,
- struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- const char *path_to[] = { METADATA_FILE_PATH(meta), to, NULL };
- const char *path_from[] = { METADATA_FILE_PATH(meta), from, NULL };
-
- return(host_link_file(path_to, path_from));
-}
-
-static int metafs_remove_file(const char *path, struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- char tmp[HOSTFS_BUFSIZE];
- const char *metadata_path[] = { METADATA_FILE_PATH(meta), path, NULL };
- char *file = get_path(metadata_path, tmp, sizeof(tmp));
- int err = -ENOMEM;
-
- if(file == NULL)
- goto out;
-
- err = os_remove_file(file);
-
- out:
- free_path(file, tmp);
- return(err);
-}
-
-static int metafs_create_directory(const char *path, int mode, int uid,
- int gid, struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- char tmp[HOSTFS_BUFSIZE];
- const char *dir_path[] = { METADATA_DIR_PATH(meta), path, NULL, NULL };
- const char *file_path[] = { METADATA_FILE_PATH(meta), path, NULL,
- NULL };
- char *file, dir_meta[sizeof("mmmm uuuuuuuuuu gggggggggg\n")];
- int err, fd;
-
- err = host_make_dir(dir_path, 0755);
- if(err)
- goto out;
-
- err = host_make_dir(file_path, 0755);
- if(err)
- goto out_rm;
-
- /* This to make the index independent of the number of elements in
- * METADATA_DIR_PATH().
- */
- dir_path[sizeof(dir_path) / sizeof(dir_path[0]) - 2] = "metadata";
-
- err = -ENOMEM;
- file = get_path(dir_path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- fd = os_open_file(file, of_create(of_rdwr(OPENFLAGS())), 0644);
- if(fd < 0){
- err = fd;
- goto out_free;
- }
-
- sprintf(dir_meta, "%d %d %d\n", mode & S_IRWXUGO, uid, gid);
- err = os_write_file(fd, dir_meta, strlen(dir_meta));
- if(err > 0)
- err = 0;
-
- os_close_file(fd);
-
- out_free:
- free_path(file, tmp);
- out_rm:
- host_remove_dir(dir_path);
- out:
- return(err);
-}
-
-static int metafs_remove_directory(const char *path, struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- char tmp[HOSTFS_BUFSIZE], *file;
- const char *dir_path[] = { METADATA_DIR_PATH(meta), path, "metadata",
- NULL };
- const char *file_path[] = { METADATA_FILE_PATH(meta), path, NULL };
- char *slash;
- int err;
-
- err = -ENOMEM;
- file = get_path(dir_path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_remove_file(file);
- if(err)
- goto out_free;
-
- slash = strrchr(file, '/');
- if(slash == NULL){
- printk("remove_shadow_directory failed to find last slash\n");
- goto out_free;
- }
- *slash = '\0';
- err = os_remove_dir(file);
- free_path(file, tmp);
-
- file = get_path(file_path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = os_remove_dir(file);
- if(err)
- goto out_free;
-
- out:
- return(err);
- out_free:
- free_path(file, tmp);
- goto out;
-}
-
-static int metafs_make_node(const char *path, int mode, int uid, int gid,
- int type, int maj, int min, struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- struct file_handle fh;
- char tmp[HOSTFS_BUFSIZE];
- const char *metadata_path[] = { METADATA_FILE_PATH(meta), path, NULL };
- int err;
- char buf[sizeof("mmmm uuuuuuuuuu gggggggggg x nnn mmm\n")], *file;
-
- sprintf(buf, "%d %d %d %c %d %d\n", mode & S_IRWXUGO, uid, gid, type,
- maj, min);
-
- err = -ENOMEM;
- file = get_path(metadata_path, tmp, sizeof(tmp));
- if(file == NULL)
- goto out;
-
- err = open_filehandle(file,
- of_create(of_rdwr(OPENFLAGS())), 0644, &fh);
- if(err)
- goto out_free;
-
- err = write_file(&fh, 0, buf, strlen(buf));
- if(err > 0)
- err = 0;
-
- close_file(&fh);
-
- out_free:
- free_path(file, tmp);
- out:
- return(err);
-}
-
-static int metafs_ownerships(const char *path, int *mode_out, int *uid_out,
- int *gid_out, char *type_out, int *maj_out,
- int *min_out, struct humfs *humfs)
-{
- struct file_handle fh;
- char buf[sizeof("mmmm uuuuuuuuuu gggggggggg x nnn mmm\n")];
- int err, n, mode, uid, gid, maj, min;
- char type;
-
- err = open_meta_file(path, humfs, &fh);
- if(err)
- goto out;
-
- err = os_read_file(fh.fd, buf, sizeof(buf) - 1);
- if(err < 0)
- goto out_close;
-
- buf[err] = '\0';
- err = 0;
-
- n = sscanf(buf, "%d %d %d %c %d %d", &mode, &uid, &gid, &type, &maj,
- &min);
- if(n == 3){
- maj = -1;
- min = -1;
- type = 0;
- err = 0;
- }
- else if(n != 6)
- err = -EINVAL;
-
- if(mode_out != NULL)
- *mode_out = mode;
- if(uid_out != NULL)
- *uid_out = uid;
- if(gid_out != NULL)
- *gid_out = uid;
- if(type_out != NULL)
- *type_out = type;
- if(maj_out != NULL)
- *maj_out = maj;
- if(min_out != NULL)
- *min_out = min;
-
- out_close:
- close_file(&fh);
- out:
- return(err);
-}
-
-static int metafs_change_ownerships(const char *path, int mode, int uid,
- int gid, struct humfs *humfs)
-{
- struct file_handle fh;
- char type;
- char buf[sizeof("mmmm uuuuuuuuuu gggggggggg x nnn mmm\n")];
- int err = -ENOMEM, old_mode, old_uid, old_gid, n, maj, min;
-
- err = open_meta_file(path, humfs, &fh);
- if(err)
- goto out;
-
- err = read_file(&fh, 0, buf, sizeof(buf) - 1);
- if(err < 0)
- goto out_close;
-
- buf[err] = '\0';
-
- n = sscanf(buf, "%d %d %d %c %d %d\n", &old_mode, &old_uid, &old_gid,
- &type, &maj, &min);
- if((n != 3) && (n != 6)){
- err = -EINVAL;
- goto out_close;
- }
-
- if(mode == -1)
- mode = old_mode;
- if(uid == -1)
- uid = old_uid;
- if(gid == -1)
- gid = old_gid;
-
- if(n == 3)
- sprintf(buf, "%d %d %d\n", mode & S_IRWXUGO, uid, gid);
- else
- sprintf(buf, "%d %d %d %c %d %d\n", mode & S_IRWXUGO, uid, gid,
- type, maj, min);
-
- err = write_file(&fh, 0, buf, strlen(buf));
- if(err > 0)
- err = 0;
-
- err = truncate_file(&fh, strlen(buf));
-
- out_close:
- close_file(&fh);
- out:
- return(err);
-}
-
-static int metafs_rename_file(const char *from, const char *to,
- struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
- const char *metadata_path_from[5], *metadata_path_to[5];
- int err;
-
- err = meta_file_path(from, meta, metadata_path_from);
- if(err)
- return(err);
-
- err = meta_file_path(to, meta, metadata_path_to);
- if(err)
- return(err);
-
- return(host_rename_file(metadata_path_from, metadata_path_to));
-}
-
-static struct humfs *metafs_init_mount(char *root)
-{
- struct meta_fs *meta;
- int err = -ENOMEM;
-
- meta = kmalloc(sizeof(*meta), GFP_KERNEL);
- if(meta == NULL)
- goto out;
-
- meta->root = uml_strdup(root);
- if(meta->root == NULL)
- goto out_free_meta;
-
- return(&meta->humfs);
-
- out_free_meta:
- kfree(meta);
- out:
- return(ERR_PTR(err));
-}
-
-static void metafs_free_mount(struct humfs *humfs)
-{
- struct meta_fs *meta = container_of(humfs, struct meta_fs, humfs);
-
- kfree(meta);
-}
-
-struct humfs_meta_ops hum_fs_meta_fs_ops = {
- .list = LIST_HEAD_INIT(hum_fs_meta_fs_ops.list),
- .name = "shadow_fs",
- .init_file = metafs_init_file,
- .open_file = metafs_open_file,
- .close_file = metafs_close_file,
- .ownerships = metafs_ownerships,
- .make_node = metafs_make_node,
- .create_file = metafs_create_file,
- .create_link = metafs_create_link,
- .remove_file = metafs_remove_file,
- .create_dir = metafs_create_directory,
- .remove_dir = metafs_remove_directory,
- .change_ownerships = metafs_change_ownerships,
- .rename_file = metafs_rename_file,
- .invisible = metafs_invisible,
- .init_mount = metafs_init_mount,
- .free_mount = metafs_free_mount,
-};
-
-static int __init init_meta_fs(void)
-{
- register_meta(&hum_fs_meta_fs_ops);
- return(0);
-}
-
-static void __exit exit_meta_fs(void)
-{
- unregister_meta(&hum_fs_meta_fs_ops);
-}
-
-__initcall(init_meta_fs);
-__exitcall(exit_meta_fs);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2004 Piotr Neuman (sikkh@wp.pl) and
- * Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL
- */
-
-#ifndef __UM_FS_METADATA
-#define __UM_FS_METADATA
-
-#include "linux/fs.h"
-#include "linux/list.h"
-#include "os.h"
-#include "hostfs.h"
-
-struct humfs {
- struct externfs_data ext;
- __u64 used;
- __u64 total;
- char *data;
- int mmap;
- int direct;
- struct humfs_meta_ops *meta;
-};
-
-struct humfs_file {
- struct humfs *mount;
- struct file_handle data;
- struct externfs_inode ext;
-};
-
-struct humfs_meta_ops {
- struct list_head list;
- char *name;
- struct humfs_file *(*init_file)(void);
- int (*open_file)(struct humfs_file *hf, const char *path,
- struct inode *inode, struct humfs *humfs);
- int (*create_file)(struct humfs_file *hf, const char *path, int mode,
- int uid, int gid, struct inode *inode,
- struct humfs *humfs);
- void (*close_file)(struct humfs_file *humfs);
- int (*ownerships)(const char *path, int *mode_out, int *uid_out,
- int *gid_out, char *type_out, int *maj_out,
- int *min_out, struct humfs *humfs);
- int (*make_node)(const char *path, int mode, int uid, int gid,
- int type, int major, int minor, struct humfs *humfs);
- int (*create_link)(const char *to, const char *from,
- struct humfs *humfs);
- int (*remove_file)(const char *path, struct humfs *humfs);
- int (*create_dir)(const char *path, int mode, int uid, int gid,
- struct humfs *humfs);
- int (*remove_dir)(const char *path, struct humfs *humfs);
- int (*change_ownerships)(const char *path, int mode, int uid, int gid,
- struct humfs *humfs);
- int (*rename_file)(const char *from, const char *to,
- struct humfs *humfs);
- void (*invisible)(struct humfs_file *hf);
- struct humfs *(*init_mount)(char *root);
- void (*free_mount)(struct humfs *humfs);
-};
-
-extern void register_meta(struct humfs_meta_ops *ops);
-extern void unregister_meta(struct humfs_meta_ops *ops);
-
-extern char *humfs_path(char *dir, char *file);
-extern char *humfs_name(struct inode *inode, char *prefix);
-extern struct humfs *inode_humfs_info(struct inode *inode);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
loff_t len, vma_len;
int ret;
- if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
- return -EINVAL;
-
if (vma->vm_start & ~HPAGE_MASK)
return -EINVAL;
unsigned long v_length;
unsigned long v_offset;
- h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
+ h_vm_pgoff = vma->vm_pgoff << (HPAGE_SHIFT - PAGE_SHIFT);
+ v_length = vma->vm_end - vma->vm_start;
v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
+
/*
* Is this VMA fully outside the truncation point?
*/
if (h_vm_pgoff >= h_pgoff)
v_offset = 0;
- v_length = vma->vm_end - vma->vm_start;
-
zap_hugepage_range(vma,
vma->vm_start + v_offset,
v_length - v_offset);
struct file *hugetlb_zero_setup(size_t size)
{
- int error = -ENOMEM;
+ int error;
struct file *file;
struct inode *inode;
struct dentry *dentry, *root;
struct qstr quick_string;
char buf[16];
- if (!capable(CAP_IPC_LOCK))
+ if (!can_do_mlock())
return ERR_PTR(-EPERM);
if (!is_hugepage_mem_enough(size))
return ERR_PTR(-ENOMEM);
- if (!user_shm_lock(size, current->user))
- return ERR_PTR(-ENOMEM);
-
root = hugetlbfs_vfsmount->mnt_root;
snprintf(buf, 16, "%lu", hugetlbfs_counter());
quick_string.name = buf;
quick_string.hash = 0;
dentry = d_alloc(root, &quick_string);
if (!dentry)
- goto out_shm_unlock;
+ return ERR_PTR(-ENOMEM);
error = -ENFILE;
file = get_empty_filp();
put_filp(file);
out_dentry:
dput(dentry);
-out_shm_unlock:
- user_shm_unlock(size, current->user);
return ERR_PTR(error);
}
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
-#include <linux/vs_base.h>
/*
* This is needed for the following functions:
struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb;
+ if (sb->s_flags & MS_TAGXID)
+ inode->i_xid = current->xid;
+ else
+ inode->i_xid = 0; /* maybe xid -1 would be better? */
// inode->i_dqh = dqhget(sb->s_dqh);
-
- /* important because of inode slab reuse */
- inode->i_xid = 0;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_rdev = 0;
+ // inode->i_xid = 0; /* maybe not too wise ... */
inode->i_security = NULL;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
* When ctime_too is specified update the ctime too.
*/
-void inode_update_time(struct inode *inode, struct vfsmount *mnt, int ctime_too)
+void inode_update_time(struct inode *inode, int ctime_too)
{
struct timespec now;
int sync_it = 0;
if (IS_NOCMTIME(inode))
return;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
+ if (IS_RDONLY(inode))
return;
now = current_kernel_time();
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
-#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/vserver/inode.h>
#include <linux/vserver/xid.h>
error = vx_proc_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
break;
#endif
- case FIOC_SETIATTR:
- case FIOC_GETIATTR:
- /*
- * Verify that this filp is a file object,
- * not (say) a socket.
- */
- error = -ENOTTY;
- if (S_ISREG(filp->f_dentry->d_inode->i_mode) ||
- S_ISDIR(filp->f_dentry->d_inode->i_mode))
- error = vc_iattr_ioctl(filp->f_dentry,
- cmd, arg);
- break;
-
default:
error = -ENOTTY;
if (S_ISREG(filp->f_dentry->d_inode->i_mode))
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
- jh = journal_grab_journal_head(bh);
- if (!jh)
- goto zap_buffer_no_jh;
+ /*
+ * Now we have the locks, check again to see whether kjournald has
+ * taken the buffer off the transaction.
+ */
+ if (!buffer_jbd(bh))
+ goto zap_buffer;
+ jh = bh2jh(bh);
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return ret;
} else {
/* There is no currently-running transaction. So the
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return ret;
} else {
/* The orphan record's transaction has
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
}
zap_buffer:
- journal_put_journal_head(jh);
-zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
int jffs_register_jffs_proc_dir(int mtd, struct jffs_control *c)
{
struct jffs_partition_dir *part_dir;
- struct proc_dir_entry *part_info = NULL;
- struct proc_dir_entry *part_layout = NULL;
- struct proc_dir_entry *part_root = NULL;
+ struct proc_dir_entry *part_info = 0;
+ struct proc_dir_entry *part_layout = 0;
+ struct proc_dir_entry *part_root = 0;
char name[10];
sprintf(name, "%d", mtd);
int jffs_unregister_jffs_proc_dir(struct jffs_control *c)
{
struct jffs_partition_dir *part_dir = jffs_part_dirs;
- struct jffs_partition_dir *prev_part_dir = NULL;
+ struct jffs_partition_dir *prev_part_dir = 0;
while (part_dir) {
if (part_dir->c == c) {
int count, int *eof, void *data)
{
struct jffs_control *c = (struct jffs_control *) data;
- struct jffs_fm *fm = NULL;
- struct jffs_fm *last_fm = NULL;
+ struct jffs_fm *fm = 0;
+ struct jffs_fm *last_fm = 0;
int len = 0;
/* Get the first item in the list */
#
-# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
+# Makefile for the linux Journalling Flash FileSystem (JFFS) routines.
#
-# $Id: Makefile.common,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+# $Id: Makefile,v 1.34 2002/03/08 11:27:59 dwmw2 Exp $
#
obj-$(CONFIG_JFFS2_FS) += jffs2.o
-jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o
+jffs2-y := compr.o compr_rubin.o compr_rtime.o compr_zlib.o
+jffs2-y += dir.o file.o ioctl.o nodelist.o malloc.o
jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o
jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o
jffs2-y += super.o
jffs2-$(CONFIG_JFFS2_FS_NAND) += wbuf.o
-jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
-jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
-jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: background.c,v 1.49 2004/07/13 08:56:40 dwmw2 Exp $
+ * $Id: background.c,v 1.44 2003/10/08 13:29:55 dwmw2 Exp $
*
*/
static int jffs2_garbage_collect_thread(void *);
+static int thread_should_wake(struct jffs2_sb_info *c);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
spin_lock(&c->erase_completion_lock);
- if (c->gc_task && jffs2_thread_should_wake(c))
+ if (c->gc_task && thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
spin_unlock(&c->erase_completion_lock);
}
for (;;) {
allow_signal(SIGHUP);
- if (!jffs2_thread_should_wake(c)) {
+ if (!thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
- /* Yes, there's a race here; we checked jffs2_thread_should_wake()
- before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
+ /* Yes, there's a race here; we checked thread_should_wake() before
+ setting current->state to TASK_INTERRUPTIBLE. But it doesn't
matter - We don't care if we miss a wakeup, because the GC thread
is only an optimisation anyway. */
schedule();
spin_unlock(&c->erase_completion_lock);
complete_and_exit(&c->gc_thread_exit, 0);
}
+
+static int thread_should_wake(struct jffs2_sb_info *c)
+{
+ int ret = 0;
+ uint32_t dirty;
+
+ if (c->unchecked_size) {
+ D1(printk(KERN_DEBUG "thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+ c->unchecked_size, c->checked_ino));
+ return 1;
+ }
+
+ /* dirty_size contains blocks on erase_pending_list
+ * those blocks are counted in c->nr_erasing_blocks.
+ * If one block is actually erased, it is not longer counted as dirty_space
+ * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+ * with c->nr_erasing_blocks * c->sector_size again.
+ * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+ * This helps us to force gc and pick eventually a clean block to spread the load.
+ */
+ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
+
+ if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
+ (dirty > c->nospc_dirty_size))
+ ret = 1;
+
+ D1(printk(KERN_DEBUG "thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
+ c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
+
+ return ret;
+}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: build.c,v 1.55 2003/10/28 17:02:44 dwmw2 Exp $
+ * $Id: build.c,v 1.52 2003/10/09 00:38:38 dwmw2 Exp $
*
*/
#include <linux/slab.h>
#include "nodelist.h"
-static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
+int jffs2_build_inode_pass1(struct jffs2_sb_info *, struct jffs2_inode_cache *);
+int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *);
static inline struct jffs2_inode_cache *
first_inode_chain(int *i, struct jffs2_sb_info *c)
ic; \
ic = next_inode(&i, ic, (c)))
-
-static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
-{
- struct jffs2_full_dirent *fd;
-
- D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
-
- /* For each child, increase nlink */
- for(fd = ic->scan_dents; fd; fd = fd->next) {
- struct jffs2_inode_cache *child_ic;
- if (!fd->ino)
- continue;
-
- /* XXX: Can get high latency here with huge directories */
-
- child_ic = jffs2_get_ino_cache(c, fd->ino);
- if (!child_ic) {
- printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
- fd->name, fd->ino, ic->ino);
- continue;
- }
-
- if (child_ic->nlink++ && fd->type == DT_DIR) {
- printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
- if (fd->ino == 1 && ic->ino == 1) {
- printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
- printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
- }
- /* What do we do about it? */
- }
- D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
- /* Can't free them. We might need them in pass 2 */
- }
-}
-
/* Scan plan:
- Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
- Scan directory tree from top down, setting nlink in inocaches
int ret;
int i;
struct jffs2_inode_cache *ic;
- struct jffs2_full_dirent *dead_fds = NULL;
/* First, scan the medium and build all the inode caches with
lists of physical nodes */
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
-
- D1(BUG_ON(ic->ino > c->highest_ino));
-
- if (ic->scan_dents) {
- jffs2_build_inode_pass1(c, ic);
- cond_resched();
+ ret = jffs2_build_inode_pass1(c, ic);
+ if (ret) {
+ D1(printk(KERN_WARNING "Eep. jffs2_build_inode_pass1 for ino %d returned %d\n", ic->ino, ret));
+ return ret;
}
+ cond_resched();
}
D1(printk(KERN_DEBUG "Pass 1 complete\n"));
+ D1(jffs2_dump_block_lists(c));
/* Next, scan for inodes with nlink == 0 and remove them. If
they were directories, then decrement the nlink of their
children too, and repeat the scan. As that's going to be
a fairly uncommon occurrence, it's not so evil to do it this
way. Recursion bad. */
- D1(printk(KERN_DEBUG "Pass 2 starting\n"));
-
- for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
- if (ic->nlink)
- continue;
+ do {
+ D1(printk(KERN_DEBUG "Pass 2 (re)starting\n"));
+ ret = 0;
+ for_each_inode(i, c, ic) {
+ D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
+ if (ic->nlink)
+ continue;
- jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
- cond_resched();
- }
-
- D1(printk(KERN_DEBUG "Pass 2a starting\n"));
-
- while (dead_fds) {
- struct jffs2_inode_cache *ic;
- struct jffs2_full_dirent *fd = dead_fds;
-
- dead_fds = fd->next;
+ /* XXX: Can get high latency here. Move the cond_resched() from the end of the loop? */
- ic = jffs2_get_ino_cache(c, fd->ino);
- D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
+ ret = jffs2_build_remove_unlinked_inode(c, ic);
+ if (ret)
+ break;
+ /* -EAGAIN means the inode's nlink was zero, so we deleted it,
+ and furthermore that it had children and their nlink has now
+ gone to zero too. So we have to restart the scan. */
+ }
+ D1(jffs2_dump_block_lists(c));
- if (ic)
- jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
- jffs2_free_full_dirent(fd);
- }
+ cond_resched();
+
+ } while(ret == -EAGAIN);
D1(printk(KERN_DEBUG "Pass 2 complete\n"));
- /* Finally, we can scan again and free the dirent structs */
+ /* Finally, we can scan again and free the dirent nodes and scan_info structs */
for_each_inode(i, c, ic) {
struct jffs2_full_dirent *fd;
D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
return ret;
}
-static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
+int jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ struct jffs2_full_dirent *fd;
+
+ D1(printk(KERN_DEBUG "jffs2_build_inode building inode #%u\n", ic->ino));
+
+ if (ic->ino > c->highest_ino)
+ c->highest_ino = ic->ino;
+
+ /* For each child, increase nlink */
+ for(fd=ic->scan_dents; fd; fd = fd->next) {
+ struct jffs2_inode_cache *child_ic;
+ if (!fd->ino)
+ continue;
+
+ /* XXX: Can get high latency here with huge directories */
+
+ child_ic = jffs2_get_ino_cache(c, fd->ino);
+ if (!child_ic) {
+ printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ fd->name, fd->ino, ic->ino);
+ continue;
+ }
+
+ if (child_ic->nlink++ && fd->type == DT_DIR) {
+ printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
+ if (fd->ino == 1 && ic->ino == 1) {
+ printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
+ printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
+ }
+ /* What do we do about it? */
+ }
+ D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
+ /* Can't free them. We might need them in pass 2 */
+ }
+ return 0;
+}
+
+int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
+ int ret = 0;
D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
jffs2_free_full_dirent(fd);
continue;
}
-
- /* Reduce nlink of the child. If it's now zero, stick it on the
- dead_fds list to be cleaned up later. Else just free the fd */
-
+ jffs2_free_full_dirent(fd);
child_ic->nlink--;
-
- if (!child_ic->nlink) {
- D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
- fd->ino, fd->name));
- fd->next = *dead_fds;
- *dead_fds = fd;
- } else {
- D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
- fd->ino, fd->name, child_ic->nlink));
- jffs2_free_full_dirent(fd);
- }
}
+ ret = -EAGAIN;
}
/*
We don't delete the inocache from the hash list and free it yet.
The erase code will do that, when all the nodes are completely gone.
*/
+
+ return ret;
}
static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright (C) 2001-2003 Red Hat, Inc.
- * Created by Arjan van de Ven <arjanv@redhat.com>
*
- * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
- * University of Szeged, Hungary
+ * Created by Arjan van de Ven <arjanv@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr.c,v 1.41 2004/06/24 09:51:38 havasi Exp $
+ * $Id: compr.c,v 1.27 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
-#include "compr.h"
-
-static spinlock_t jffs2_compressor_list_lock = SPIN_LOCK_UNLOCKED;
-
-/* Available compressors are on this list */
-static LIST_HEAD(jffs2_compressor_list);
-
-/* Actual compression mode */
-static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+#if defined(__KERNEL__) || defined (__ECOS)
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#else
+#define KERN_DEBUG
+#define KERN_NOTICE
+#define KERN_WARNING
+#define printk printf
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#endif
-void jffs2_set_compression_mode(int mode)
-{
- jffs2_compression_mode = mode;
-}
+#include <linux/jffs2.h>
-int jffs2_get_compression_mode(void)
-{
- return jffs2_compression_mode;
-}
+int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen);
+void jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen);
+int jffs2_rtime_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen);
+void jffs2_rtime_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen);
+int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen);
+void jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen);
+int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen);
+void jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen);
-/* Statistics for blocks stored without compression */
-static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
/* jffs2_compress:
* @data: Pointer to uncompressed data
- * @cdata: Pointer to returned pointer to buffer for compressed data
+ * @cdata: Pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
- * Returns: Lower byte to be stored with data indicating compression type used.
+ * Returns: Byte to be stored with data indicating compression type used.
* Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
- * Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
* jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
-uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- unsigned char *data_in, unsigned char **cpage_out,
- uint32_t *datalen, uint32_t *cdatalen)
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t *datalen, uint32_t *cdatalen)
{
- int ret = JFFS2_COMPR_NONE;
- int compr_ret;
- struct jffs2_compressor *this, *best=NULL;
- unsigned char *output_buf = NULL, *tmp_buf;
- uint32_t orig_slen, orig_dlen;
- uint32_t best_slen=0, best_dlen=0;
+ int ret;
- switch (jffs2_compression_mode) {
- case JFFS2_COMPR_MODE_NONE:
- break;
- case JFFS2_COMPR_MODE_PRIORITY:
- output_buf = kmalloc(*cdatalen,GFP_KERNEL);
- if (!output_buf) {
- printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
- goto out;
- }
- orig_slen = *datalen;
- orig_dlen = *cdatalen;
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- /* Skip decompress-only backwards-compatibility and disabled modules */
- if ((!this->compress)||(this->disabled))
- continue;
+ ret = jffs2_zlib_compress(data_in, cpage_out, datalen, cdatalen);
+ if (!ret) {
+ return JFFS2_COMPR_ZLIB;
+ }
+#if 0 /* Disabled 23/9/1. With zlib it hardly ever gets a look in */
+ ret = jffs2_dynrubin_compress(data_in, cpage_out, datalen, cdatalen);
+ if (!ret) {
+ return JFFS2_COMPR_DYNRUBIN;
+ }
+#endif
+#if 0 /* Disabled 26/2/1. Obsoleted by dynrubin */
+ ret = jffs2_rubinmips_compress(data_in, cpage_out, datalen, cdatalen);
+ if (!ret) {
+ return JFFS2_COMPR_RUBINMIPS;
+ }
+#endif
+ /* rtime does manage to recompress already-compressed data */
+ ret = jffs2_rtime_compress(data_in, cpage_out, datalen, cdatalen);
+ if (!ret) {
+ return JFFS2_COMPR_RTIME;
+ }
+#if 0
+ /* We don't need to copy. Let the caller special-case the COMPR_NONE case. */
+ /* If we get here, no compression is going to work */
+ /* But we might want to use the fragmentation part -- Arjan */
+ memcpy(cpage_out,data_in,min(*datalen,*cdatalen));
+ if (*datalen > *cdatalen)
+ *datalen = *cdatalen;
+#endif
+ return JFFS2_COMPR_NONE; /* We failed to compress */
- this->usecount++;
- spin_unlock(&jffs2_compressor_list_lock);
- *datalen = orig_slen;
- *cdatalen = orig_dlen;
- compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL);
- spin_lock(&jffs2_compressor_list_lock);
- this->usecount--;
- if (!compr_ret) {
- ret = this->compr;
- this->stat_compr_blocks++;
- this->stat_compr_orig_size += *datalen;
- this->stat_compr_new_size += *cdatalen;
- break;
- }
- }
- spin_unlock(&jffs2_compressor_list_lock);
- if (ret == JFFS2_COMPR_NONE) kfree(output_buf);
- break;
- case JFFS2_COMPR_MODE_SIZE:
- orig_slen = *datalen;
- orig_dlen = *cdatalen;
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- /* Skip decompress-only backwards-compatibility and disabled modules */
- if ((!this->compress)||(this->disabled))
- continue;
- /* Allocating memory for output buffer if necessary */
- if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) {
- spin_unlock(&jffs2_compressor_list_lock);
- kfree(this->compr_buf);
- spin_lock(&jffs2_compressor_list_lock);
- this->compr_buf_size=0;
- this->compr_buf=NULL;
- }
- if (!this->compr_buf) {
- spin_unlock(&jffs2_compressor_list_lock);
- tmp_buf = kmalloc(orig_dlen,GFP_KERNEL);
- spin_lock(&jffs2_compressor_list_lock);
- if (!tmp_buf) {
- printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen);
- continue;
- }
- else {
- this->compr_buf = tmp_buf;
- this->compr_buf_size = orig_dlen;
- }
- }
- this->usecount++;
- spin_unlock(&jffs2_compressor_list_lock);
- *datalen = orig_slen;
- *cdatalen = orig_dlen;
- compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL);
- spin_lock(&jffs2_compressor_list_lock);
- this->usecount--;
- if (!compr_ret) {
- if ((!best_dlen)||(best_dlen>*cdatalen)) {
- best_dlen = *cdatalen;
- best_slen = *datalen;
- best = this;
- }
- }
- }
- if (best_dlen) {
- *cdatalen = best_dlen;
- *datalen = best_slen;
- output_buf = best->compr_buf;
- best->compr_buf = NULL;
- best->compr_buf_size = 0;
- best->stat_compr_blocks++;
- best->stat_compr_orig_size += best_slen;
- best->stat_compr_new_size += best_dlen;
- ret = best->compr;
- }
- spin_unlock(&jffs2_compressor_list_lock);
- break;
- default:
- printk(KERN_ERR "JFFS2: unknow compression mode.\n");
- }
- out:
- if (ret == JFFS2_COMPR_NONE) {
- *cpage_out = data_in;
- *datalen = *cdatalen;
- none_stat_compr_blocks++;
- none_stat_compr_size += *datalen;
- }
- else {
- *cpage_out = output_buf;
- }
- return ret;
}
-int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- uint16_t comprtype, unsigned char *cdata_in,
+
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
- struct jffs2_compressor *this;
- int ret;
-
- switch (comprtype & 0xff) {
+ switch (comprtype) {
case JFFS2_COMPR_NONE:
/* This should be special-cased elsewhere, but we might as well deal with it */
memcpy(data_out, cdata_in, datalen);
- none_stat_decompr_blocks++;
break;
+
case JFFS2_COMPR_ZERO:
memset(data_out, 0, datalen);
break;
- default:
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- if (comprtype == this->compr) {
- this->usecount++;
- spin_unlock(&jffs2_compressor_list_lock);
- ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL);
- spin_lock(&jffs2_compressor_list_lock);
- if (ret) {
- printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
- }
- else {
- this->stat_decompr_blocks++;
- }
- this->usecount--;
- spin_unlock(&jffs2_compressor_list_lock);
- return ret;
- }
- }
- printk(KERN_WARNING "JFFS2 compression type 0x%02x not avaiable.\n", comprtype);
- spin_unlock(&jffs2_compressor_list_lock);
- return -EIO;
- }
- return 0;
-}
-
-int jffs2_register_compressor(struct jffs2_compressor *comp)
-{
- struct jffs2_compressor *this;
-
- if (!comp->name) {
- printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
- return -1;
- }
- comp->compr_buf_size=0;
- comp->compr_buf=NULL;
- comp->usecount=0;
- comp->stat_compr_orig_size=0;
- comp->stat_compr_new_size=0;
- comp->stat_compr_blocks=0;
- comp->stat_decompr_blocks=0;
- D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
-
- spin_lock(&jffs2_compressor_list_lock);
-
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- if (this->priority < comp->priority) {
- list_add(&comp->list, this->list.prev);
- goto out;
- }
- }
- list_add_tail(&comp->list, &jffs2_compressor_list);
-out:
- D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
- printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
- })
-
- spin_unlock(&jffs2_compressor_list_lock);
-
- return 0;
-}
-
-int jffs2_unregister_compressor(struct jffs2_compressor *comp)
-{
- D2(struct jffs2_compressor *this;)
-
- D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
-
- spin_lock(&jffs2_compressor_list_lock);
-
- if (comp->usecount) {
- spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n");
- return -1;
- }
- list_del(&comp->list);
-
- D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
- printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
- })
- spin_unlock(&jffs2_compressor_list_lock);
- return 0;
-}
-
-#ifdef CONFIG_JFFS2_PROC
-
-#define JFFS2_STAT_BUF_SIZE 16000
-
-char *jffs2_list_compressors(void)
-{
- struct jffs2_compressor *this;
- char *buf, *act_buf;
-
- act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority);
- if ((this->disabled)||(!this->compress))
- act_buf += sprintf(act_buf,"disabled");
- else
- act_buf += sprintf(act_buf,"enabled");
- act_buf += sprintf(act_buf,"\n");
- }
- return buf;
-}
-
-char *jffs2_stats(void)
-{
- struct jffs2_compressor *this;
- char *buf, *act_buf;
-
- act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
- act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n");
- act_buf += sprintf(act_buf,"%10s ","none");
- act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks,
- none_stat_compr_size, none_stat_decompr_blocks);
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- act_buf += sprintf(act_buf,"%10s ",this->name);
- if ((this->disabled)||(!this->compress))
- act_buf += sprintf(act_buf,"- ");
- else
- act_buf += sprintf(act_buf,"+ ");
- act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks,
- this->stat_compr_new_size, this->stat_compr_orig_size,
- this->stat_decompr_blocks);
- act_buf += sprintf(act_buf,"\n");
- }
- spin_unlock(&jffs2_compressor_list_lock);
-
- return buf;
-}
-
-char *jffs2_get_compression_mode_name(void)
-{
- switch (jffs2_compression_mode) {
- case JFFS2_COMPR_MODE_NONE:
- return "none";
- case JFFS2_COMPR_MODE_PRIORITY:
- return "priority";
- case JFFS2_COMPR_MODE_SIZE:
- return "size";
- }
- return "unkown";
-}
-
-int jffs2_set_compression_mode_name(const char *name)
-{
- if (!strcmp("none",name)) {
- jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
- return 0;
- }
- if (!strcmp("priority",name)) {
- jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
- return 0;
- }
- if (!strcmp("size",name)) {
- jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
- return 0;
- }
- return 1;
-}
-
-static int jffs2_compressor_Xable(const char *name, int disabled)
-{
- struct jffs2_compressor *this;
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- if (!strcmp(this->name, name)) {
- this->disabled = disabled;
- spin_unlock(&jffs2_compressor_list_lock);
- return 0;
- }
- }
- spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
- return 1;
-}
-
-int jffs2_enable_compressor_name(const char *name)
-{
- return jffs2_compressor_Xable(name, 0);
-}
-
-int jffs2_disable_compressor_name(const char *name)
-{
- return jffs2_compressor_Xable(name, 1);
-}
-
-int jffs2_set_compressor_priority(const char *name, int priority)
-{
- struct jffs2_compressor *this,*comp;
- spin_lock(&jffs2_compressor_list_lock);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- if (!strcmp(this->name, name)) {
- this->priority = priority;
- comp = this;
- goto reinsert;
- }
- }
- spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
- return 1;
-reinsert:
- /* list is sorted in the order of priority, so if
- we change it we have to reinsert it into the
- good place */
- list_del(&comp->list);
- list_for_each_entry(this, &jffs2_compressor_list, list) {
- if (this->priority < comp->priority) {
- list_add(&comp->list, this->list.prev);
- spin_unlock(&jffs2_compressor_list_lock);
- return 0;
- }
- }
- list_add_tail(&comp->list, &jffs2_compressor_list);
- spin_unlock(&jffs2_compressor_list_lock);
- return 0;
-}
-
-#endif
+ case JFFS2_COMPR_ZLIB:
+ jffs2_zlib_decompress(cdata_in, data_out, cdatalen, datalen);
+ break;
-void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
-{
- if (orig != comprbuf)
- kfree(comprbuf);
-}
+ case JFFS2_COMPR_RTIME:
+ jffs2_rtime_decompress(cdata_in, data_out, cdatalen, datalen);
+ break;
-int jffs2_compressors_init(void)
-{
-/* Registering compressors */
-#ifdef CONFIG_JFFS2_ZLIB
- jffs2_zlib_init();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
- jffs2_rtime_init();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
- jffs2_rubinmips_init();
- jffs2_dynrubin_init();
-#endif
-#ifdef CONFIG_JFFS2_LZARI
- jffs2_lzari_init();
-#endif
-#ifdef CONFIG_JFFS2_LZO
- jffs2_lzo_init();
-#endif
-/* Setting default compression mode */
-#ifdef CONFIG_JFFS2_CMODE_NONE
- jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
- D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
-#else
-#ifdef CONFIG_JFFS2_CMODE_SIZE
- jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
- D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
+ case JFFS2_COMPR_RUBINMIPS:
+#if 0 /* Disabled 23/9/1 */
+ jffs2_rubinmips_decompress(cdata_in, data_out, cdatalen, datalen);
#else
- D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
+ printk(KERN_WARNING "JFFS2: Rubinmips compression encountered but support not compiled in!\n");
#endif
+ break;
+ case JFFS2_COMPR_DYNRUBIN:
+#if 1 /* Phase this one out */
+ jffs2_dynrubin_decompress(cdata_in, data_out, cdatalen, datalen);
+#else
+ printk(KERN_WARNING "JFFS2: Dynrubin compression encountered but support not compiled in!\n");
#endif
- return 0;
-}
+ break;
-int jffs2_compressors_exit(void)
-{
-/* Unregistering compressors */
-#ifdef CONFIG_JFFS2_LZO
- jffs2_lzo_exit();
-#endif
-#ifdef CONFIG_JFFS2_LZARI
- jffs2_lzari_exit();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
- jffs2_dynrubin_exit();
- jffs2_rubinmips_exit();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
- jffs2_rtime_exit();
-#endif
-#ifdef CONFIG_JFFS2_ZLIB
- jffs2_zlib_exit();
-#endif
- return 0;
+ default:
+ printk(KERN_NOTICE "Unknown JFFS2 compression type 0x%02x\n", comprtype);
+ return -EIO;
+ }
+ return 0;
}
+++ /dev/null
-/*
- * JFFS2 -- Journalling Flash File System, Version 2.
- *
- * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
- * University of Szeged, Hungary
- *
- * For licensing information, see the file 'LICENCE' in the
- * jffs2 directory.
- *
- * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
- *
- */
-
-#ifndef __JFFS2_COMPR_H__
-#define __JFFS2_COMPR_H__
-
-#include <linux/kernel.h>
-#include <linux/vmalloc.h>
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/jffs2.h>
-#include <linux/jffs2_fs_i.h>
-#include <linux/jffs2_fs_sb.h>
-#include "nodelist.h"
-
-#define JFFS2_RUBINMIPS_PRIORITY 10
-#define JFFS2_DYNRUBIN_PRIORITY 20
-#define JFFS2_LZARI_PRIORITY 30
-#define JFFS2_LZO_PRIORITY 40
-#define JFFS2_RTIME_PRIORITY 50
-#define JFFS2_ZLIB_PRIORITY 60
-
-#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
-#define JFFS2_DYNRUBIN_DISABLED /* for decompression */
-
-#define JFFS2_COMPR_MODE_NONE 0
-#define JFFS2_COMPR_MODE_PRIORITY 1
-#define JFFS2_COMPR_MODE_SIZE 2
-
-void jffs2_set_compression_mode(int mode);
-int jffs2_get_compression_mode(void);
-
-struct jffs2_compressor {
- struct list_head list;
- int priority; /* used by prirority comr. mode */
- char *name;
- char compr; /* JFFS2_COMPR_XXX */
- int (*compress)(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t *srclen, uint32_t *destlen, void *model);
- int (*decompress)(unsigned char *cdata_in, unsigned char *data_out,
- uint32_t cdatalen, uint32_t datalen, void *model);
- int usecount;
- int disabled; /* if seted the compressor won't compress */
- unsigned char *compr_buf; /* used by size compr. mode */
- uint32_t compr_buf_size; /* used by size compr. mode */
- uint32_t stat_compr_orig_size;
- uint32_t stat_compr_new_size;
- uint32_t stat_compr_blocks;
- uint32_t stat_decompr_blocks;
-};
-
-int jffs2_register_compressor(struct jffs2_compressor *comp);
-int jffs2_unregister_compressor(struct jffs2_compressor *comp);
-
-int jffs2_compressors_init(void);
-int jffs2_compressors_exit(void);
-
-uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- unsigned char *data_in, unsigned char **cpage_out,
- uint32_t *datalen, uint32_t *cdatalen);
-
-int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- uint16_t comprtype, unsigned char *cdata_in,
- unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
-
-void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig);
-
-#ifdef CONFIG_JFFS2_PROC
-int jffs2_enable_compressor_name(const char *name);
-int jffs2_disable_compressor_name(const char *name);
-int jffs2_set_compression_mode_name(const char *mode_name);
-char *jffs2_get_compression_mode_name(void);
-int jffs2_set_compressor_priority(const char *mode_name, int priority);
-char *jffs2_list_compressors(void);
-char *jffs2_stats(void);
-#endif
-
-/* Compressor modules */
-/* These functions will be called by jffs2_compressors_init/exit */
-
-#ifdef CONFIG_JFFS2_RUBIN
-int jffs2_rubinmips_init(void);
-void jffs2_rubinmips_exit(void);
-int jffs2_dynrubin_init(void);
-void jffs2_dynrubin_exit(void);
-#endif
-#ifdef CONFIG_JFFS2_RTIME
-int jffs2_rtime_init(void);
-void jffs2_rtime_exit(void);
-#endif
-#ifdef CONFIG_JFFS2_ZLIB
-int jffs2_zlib_init(void);
-void jffs2_zlib_exit(void);
-#endif
-#ifdef CONFIG_JFFS2_LZARI
-int jffs2_lzari_init(void);
-void jffs2_lzari_exit(void);
-#endif
-#ifdef CONFIG_JFFS2_LZO
-int jffs2_lzo_init(void);
-void jffs2_lzo_exit(void);
-#endif
-
-#endif /* __JFFS2_COMPR_H__ */
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr_rtime.c,v 1.14 2004/06/23 16:34:40 havasi Exp $
+ * $Id: compr_rtime.c,v 1.11 2003/10/04 08:33:06 dwmw2 Exp $
*
*
* Very simple lz77-ish encoder.
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/jffs2.h>
-#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
int jffs2_rtime_compress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t *sourcelen, uint32_t *dstlen, void *model)
+ uint32_t *sourcelen, uint32_t *dstlen)
{
short positions[256];
int outpos = 0;
}
-int jffs2_rtime_decompress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t srclen, uint32_t destlen, void *model)
+void jffs2_rtime_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen)
{
short positions[256];
int outpos = 0;
outpos+=repeat;
}
}
- }
- return 0;
+ }
}
-static struct jffs2_compressor jffs2_rtime_comp = {
- .priority = JFFS2_RTIME_PRIORITY,
- .name = "rtime",
- .compr = JFFS2_COMPR_RTIME,
- .compress = &jffs2_rtime_compress,
- .decompress = &jffs2_rtime_decompress,
-#ifdef JFFS2_RTIME_DISABLED
- .disabled = 1,
-#else
- .disabled = 0,
-#endif
-};
-
-int jffs2_rtime_init(void)
-{
- return jffs2_register_compressor(&jffs2_rtime_comp);
-}
-void jffs2_rtime_exit(void)
-{
- jffs2_unregister_compressor(&jffs2_rtime_comp);
-}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr_rubin.c,v 1.20 2004/06/23 16:34:40 havasi Exp $
+ * $Id: compr_rubin.c,v 1.17 2002/05/20 14:56:37 dwmw2 Exp $
*
*/
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/jffs2.h>
#include "compr_rubin.h"
#include "histo_mips.h"
-#include "compr.h"
+
+
static void init_rubin(struct rubin_state *rs, int div, int *bits)
{
#if 0
/* _compress returns the compressed size, -1 if bigger */
int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t *sourcelen, uint32_t *dstlen, void *model)
+ uint32_t *sourcelen, uint32_t *dstlen)
{
return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
}
#endif
int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t *sourcelen, uint32_t *dstlen, void *model)
+ uint32_t *sourcelen, uint32_t *dstlen)
{
int bits[8];
unsigned char histo[256];
}
-int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t sourcelen, uint32_t dstlen, void *model)
+void jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t sourcelen, uint32_t dstlen)
{
rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
- return 0;
}
-int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t sourcelen, uint32_t dstlen, void *model)
+void jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t sourcelen, uint32_t dstlen)
{
int bits[8];
int c;
bits[c] = data_in[c];
rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen);
- return 0;
-}
-
-static struct jffs2_compressor jffs2_rubinmips_comp = {
- .priority = JFFS2_RUBINMIPS_PRIORITY,
- .name = "rubinmips",
- .compr = JFFS2_COMPR_DYNRUBIN,
- .compress = NULL, /*&jffs2_rubinmips_compress,*/
- .decompress = &jffs2_rubinmips_decompress,
-#ifdef JFFS2_RUBINMIPS_DISABLED
- .disabled = 1,
-#else
- .disabled = 0,
-#endif
-};
-
-int jffs2_rubinmips_init(void)
-{
- return jffs2_register_compressor(&jffs2_rubinmips_comp);
-}
-
-void jffs2_rubinmips_exit(void)
-{
- jffs2_unregister_compressor(&jffs2_rubinmips_comp);
-}
-
-static struct jffs2_compressor jffs2_dynrubin_comp = {
- .priority = JFFS2_DYNRUBIN_PRIORITY,
- .name = "dynrubin",
- .compr = JFFS2_COMPR_RUBINMIPS,
- .compress = jffs2_dynrubin_compress,
- .decompress = &jffs2_dynrubin_decompress,
-#ifdef JFFS2_DYNRUBIN_DISABLED
- .disabled = 1,
-#else
- .disabled = 0,
-#endif
-};
-
-int jffs2_dynrubin_init(void)
-{
- return jffs2_register_compressor(&jffs2_dynrubin_comp);
-}
-
-void jffs2_dynrubin_exit(void)
-{
- jffs2_unregister_compressor(&jffs2_dynrubin_comp);
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr_zlib.c,v 1.28 2004/06/23 16:34:40 havasi Exp $
+ * $Id: compr_zlib.c,v 1.24 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/zlib.h>
#include <linux/zutil.h>
#include <asm/semaphore.h>
#include "nodelist.h"
-#include "compr.h"
/* Plan: call deflate() with avail_in == *sourcelen,
avail_out = *dstlen - 12 and flush == Z_FINISH.
static z_stream inf_strm, def_strm;
#ifdef __KERNEL__ /* Linux-only */
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-
-static int __init alloc_workspaces(void)
+int __init jffs2_zlib_init(void)
{
def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
if (!def_strm.workspace) {
return 0;
}
-static void free_workspaces(void)
+void jffs2_zlib_exit(void)
{
vfree(def_strm.workspace);
vfree(inf_strm.workspace);
}
-#else
-#define alloc_workspaces() (0)
-#define free_workspaces() do { } while(0)
#endif /* __KERNEL__ */
int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t *sourcelen, uint32_t *dstlen, void *model)
+ uint32_t *sourcelen, uint32_t *dstlen)
{
int ret;
return ret;
}
-int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
- uint32_t srclen, uint32_t destlen, void *model)
+void jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen)
{
int ret;
int wbits = MAX_WBITS;
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
printk(KERN_WARNING "inflateInit failed\n");
up(&inflate_sem);
- return 1;
+ return;
}
while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
}
zlib_inflateEnd(&inf_strm);
up(&inflate_sem);
- return 0;
-}
-
-static struct jffs2_compressor jffs2_zlib_comp = {
- .priority = JFFS2_ZLIB_PRIORITY,
- .name = "zlib",
- .compr = JFFS2_COMPR_ZLIB,
- .compress = &jffs2_zlib_compress,
- .decompress = &jffs2_zlib_decompress,
-#ifdef JFFS2_ZLIB_DISABLED
- .disabled = 1,
-#else
- .disabled = 0,
-#endif
-};
-
-int __init jffs2_zlib_init(void)
-{
- int ret;
-
- ret = alloc_workspaces();
- if (ret)
- return ret;
-
- ret = jffs2_register_compressor(&jffs2_zlib_comp);
- if (ret)
- free_workspaces();
-
- return ret;
-}
-
-void jffs2_zlib_exit(void)
-{
- jffs2_unregister_compressor(&jffs2_zlib_comp);
- free_workspaces();
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: erase.c,v 1.60 2004/06/30 17:26:15 dbrown Exp $
+ * $Id: erase.c,v 1.53 2003/10/08 17:22:54 dwmw2 Exp $
*
*/
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
#endif
-static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
int ret;
- uint32_t bad_offset;
#ifdef __ECOS
ret = jffs2_flash_erase(c, jeb);
if (!ret) {
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
- instr->fail_addr = 0xffffffff;
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
+ /* NAND , read out the fail counter, if possible */
+ if (!jffs2_can_mark_obsolete(c))
+ jffs2_nand_read_failcnt(c,jeb);
+
ret = c->mtd->erase(c->mtd, instr);
if (!ret)
return;
- bad_offset = instr->fail_addr;
kfree(instr);
#endif /* __ECOS */
else
printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
- jffs2_erase_failed(c, jeb, bad_offset);
+ jffs2_erase_failed(c, jeb);
}
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *jeb;
spin_unlock(&c->erase_completion_lock);
jffs2_mark_erased_block(c, jeb);
- if (!--count) {
- D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
- goto done;
- }
-
} else if (!list_empty(&c->erase_pending_list)) {
jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
}
spin_unlock(&c->erase_completion_lock);
- done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
up(&c->erase_free_sem);
jffs2_erase_pending_trigger(c);
}
-static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- /* For NAND, if the failure did not occur at the device level for a
- specific physical page, don't bother updating the bad block table. */
- if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) {
- /* We had a device-level failure to erase. Let's see if we've
- failed too many times. */
- if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
- /* We'd like to give this block another try. */
- spin_lock(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add(&jeb->list, &c->erase_pending_list);
- c->erasing_size -= c->sector_size;
- c->dirty_size += c->sector_size;
- jeb->dirty_size = c->sector_size;
- spin_unlock(&c->erase_completion_lock);
- return;
- }
- }
-
- spin_lock(&c->erase_completion_lock);
- c->erasing_size -= c->sector_size;
- c->bad_size += c->sector_size;
- list_del(&jeb->list);
- list_add(&jeb->list, &c->bad_list);
- c->nr_erasing_blocks--;
- spin_unlock(&c->erase_completion_lock);
- wake_up(&c->erase_wait);
+ spin_lock(&c->erase_completion_lock);
+ c->erasing_size -= c->sector_size;
+ c->bad_size += c->sector_size;
+ list_del(&jeb->list);
+ list_add(&jeb->list, &c->bad_list);
+ c->nr_erasing_blocks--;
+ spin_unlock(&c->erase_completion_lock);
+ wake_up(&c->erase_wait);
}
#ifndef __ECOS
if(instr->state != MTD_ERASE_DONE) {
printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
- jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
+ jffs2_erase_failed(priv->c, priv->jeb);
} else {
jffs2_erase_succeeded(priv->c, priv->jeb);
}
jeb->last_node = NULL;
}
+void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+{
+ OFNI_BS_2SFFJ(c)->s_dirt = 1;
+}
+
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_raw_node_ref *marker_ref = NULL;
unsigned char *ebuf;
size_t retlen;
int ret;
- uint32_t bad_offset;
if (!jffs2_cleanmarker_oob(c)) {
marker_ref = jffs2_alloc_raw_node_ref();
uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
- bad_offset = ofs;
-
ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
/* It's OK. We know it's properly aligned */
unsigned long datum = *(unsigned long *)(&ebuf[i]);
if (datum + 1) {
- bad_offset += i;
- printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
+ printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, ofs + i);
bad:
if (!jffs2_cleanmarker_oob(c))
jffs2_free_raw_node_ref(marker_ref);
+ else
+ jffs2_write_nand_badblock( c ,jeb );
kfree(ebuf);
bad2:
spin_lock(&c->erase_completion_lock);
- /* Stick it on a list (any list) so
- erase_failed can take it right off
- again. Silly, but shouldn't happen
- often. */
- list_add(&jeb->list, &c->erasing_list);
+ c->erasing_size -= c->sector_size;
+ c->bad_size += c->sector_size;
+
+ list_add_tail(&jeb->list, &c->bad_list);
+ c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
- jffs2_erase_failed(c, jeb, bad_offset);
+ wake_up(&c->erase_wait);
return;
}
}
}
kfree(ebuf);
}
-
- bad_offset = jeb->offset;
-
+
/* Write the erase complete marker */
D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
if (jffs2_cleanmarker_oob(c)) {
.totlen = cpu_to_je32(c->cleanmarker_size)
};
- marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
+ marker.hdr_crc = cpu_to_je32(crc32(0, &marker, je32_to_cpu(marker.totlen) - 4));
- /* We only write the header; the rest was noise or padding anyway */
- ret = jffs2_flash_write(c, jeb->offset, sizeof(marker), &retlen, (char *)&marker);
+ ret = jffs2_flash_write(c, jeb->offset, je32_to_cpu(marker.totlen), &retlen, (char *)&marker);
if (ret) {
printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
jeb->offset, ret);
goto bad2;
}
- if (retlen != sizeof(marker)) {
- printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
- jeb->offset, sizeof(marker), retlen);
+ if (retlen != je32_to_cpu(marker.totlen)) {
+ printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %d, got %zd\n",
+ jeb->offset, je32_to_cpu(marker.totlen), retlen);
goto bad2;
}
marker_ref->next_in_ino = NULL;
marker_ref->next_phys = NULL;
marker_ref->flash_offset = jeb->offset | REF_NORMAL;
- marker_ref->__totlen = c->cleanmarker_size;
+ marker_ref->totlen = PAD(je32_to_cpu(marker.totlen));
jeb->first_node = jeb->last_node = marker_ref;
- jeb->free_size = c->sector_size - c->cleanmarker_size;
- jeb->used_size = c->cleanmarker_size;
+ jeb->free_size = c->sector_size - marker_ref->totlen;
+ jeb->used_size = marker_ref->totlen;
jeb->dirty_size = 0;
jeb->wasted_size = 0;
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: file.c,v 1.98 2004/03/19 16:41:09 dwmw2 Exp $
+ * $Id: file.c,v 1.96 2003/10/11 11:47:23 dwmw2 Exp $
*
*/
unsigned char *pg_buf;
int ret;
- D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
+ D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
if (!PageLocked(pg))
PAGE_BUG(pg);
flush_dcache_page(pg);
kunmap(pg);
- D2(printk(KERN_DEBUG "readpage finished\n"));
+ D1(printk(KERN_DEBUG "readpage finished\n"));
return 0;
}
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode *ri;
- unsigned aligned_start = start & ~3;
int ret = 0;
uint32_t writtenlen = 0;
hurt to do it again. The alternative is ifdefs, which are ugly. */
kmap(pg);
- ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
- (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
- end - aligned_start, &writtenlen);
+ ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start,
+ (pg->index << PAGE_CACHE_SHIFT) + start,
+ end - start, &writtenlen);
kunmap(pg);
/* There was an error writing. */
SetPageError(pg);
}
-
- /* Adjust writtenlen for the padding we did, so we don't confuse our caller */
- if (writtenlen < (start&3))
- writtenlen = 0;
- else
- writtenlen -= (start&3);
if (writtenlen) {
if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: fs.c,v 1.46 2004/07/13 08:56:54 dwmw2 Exp $
+ * $Id: fs.c,v 1.32 2003/10/11 11:47:23 dwmw2 Exp $
*
*/
mdata = kmalloc(f->metadata->size, GFP_USER);
if (!mdata)
return -ENOMEM;
- ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
+ ret = jffs2_read_dnode(c, f->metadata, mdata, 0, mdatalen);
if (ret) {
kfree(mdata);
return ret;
old_metadata = f->metadata;
- if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
+ vmtruncate(inode, iattr->ia_size);
jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size);
+ }
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
up(&f->sem);
jffs2_complete_reservation(c);
- /* We have to do the vmtruncate() without f->sem held, since
- some pages may be locked and waiting for it in readpage().
- We are protected from a simultaneous write() extending i_size
- back past iattr->ia_size, because do_truncate() holds the
- generic inode semaphore. */
- if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
- vmtruncate(inode, iattr->ia_size);
-
return 0;
}
case S_IFCHR:
/* Read the device numbers from the media */
D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
- if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
+ if (jffs2_read_dnode(c, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
/* Eep */
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
up(&f->sem);
struct iattr iattr;
if (!(inode->i_state & I_DIRTY_DATASYNC)) {
- D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+ D1(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
return;
}
/* We stop if it was running, then restart if it needs to.
This also catches the case where it was stopped and this
- is just a remount to restart it.
- Flush the writebuffer, if neccecary, else we loose it */
- if (!(sb->s_flags & MS_RDONLY)) {
+ is just a remount to restart it */
+ if (!(sb->s_flags & MS_RDONLY))
jffs2_stop_garbage_collect_thread(c);
- down(&c->alloc_sem);
- jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
- }
if (!(*flags & MS_RDONLY))
jffs2_start_garbage_collect_thread(c);
D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
jffs2_garbage_collect_trigger(c);
- jffs2_erase_pending_blocks(c, 0);
+ jffs2_erase_pending_blocks(c);
jffs2_flush_wbuf_gc(c, 0);
}
c = JFFS2_SB_INFO(sb);
-#ifndef CONFIG_JFFS2_FS_NAND
- if (c->mtd->type == MTD_NANDFLASH) {
- printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
- return -EINVAL;
- }
-#endif
-
c->flash_size = c->mtd->size;
/*
* Check, if we have to concatenate physical blocks to larger virtual blocks
* to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation)
*/
- c->sector_size = c->mtd->erasesize;
- blocks = c->flash_size / c->sector_size;
- while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) {
+ blocks = c->flash_size / c->mtd->erasesize;
+ while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024))
blocks >>= 1;
- c->sector_size <<= 1;
- }
- /*
- * Size alignment check
- */
- if ((c->sector_size * blocks) != c->flash_size) {
- c->flash_size = c->sector_size * blocks;
- printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
- c->flash_size / 1024);
- }
-
+ c->sector_size = c->flash_size / blocks;
if (c->sector_size != c->mtd->erasesize)
printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n",
c->mtd->erasesize / 1024, c->sector_size / 1024);
c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
/* Joern -- stick alignment for weird 8-byte-page flash here */
- /* NAND (or other bizarre) flash... do setup accordingly */
- ret = jffs2_flash_setup(c);
- if (ret)
- return ret;
+ if (jffs2_cleanmarker_oob(c)) {
+ /* NAND (or other bizarre) flash... do setup accordingly */
+ ret = jffs2_nand_flash_setup(c);
+ if (ret)
+ return ret;
+ }
c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
if (!c->inocache_list) {
out_inohash:
kfree(c->inocache_list);
out_wbuf:
- jffs2_flash_cleanup(c);
-
- return ret;
-}
-
-void jffs2_gc_release_inode(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f)
-{
- iput(OFNI_EDONI_2SFFJ(f));
-}
-
-struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
- int inum, int nlink)
-{
- struct inode *inode;
- struct jffs2_inode_cache *ic;
- if (!nlink) {
- /* The inode has zero nlink but its nodes weren't yet marked
- obsolete. This has to be because we're still waiting for
- the final (close() and) iput() to happen.
-
- There's a possibility that the final iput() could have
- happened while we were contemplating. In order to ensure
- that we don't cause a new read_inode() (which would fail)
- for the inode in question, we use ilookup() in this case
- instead of iget().
-
- The nlink can't _become_ zero at this point because we're
- holding the alloc_sem, and jffs2_do_unlink() would also
- need that while decrementing nlink on any inode.
- */
- inode = ilookup(OFNI_BS_2SFFJ(c), inum);
- if (!inode) {
- D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
- inum));
-
- spin_lock(&c->inocache_lock);
- ic = jffs2_get_ino_cache(c, inum);
- if (!ic) {
- D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
- spin_unlock(&c->inocache_lock);
- return NULL;
- }
- if (ic->state != INO_STATE_CHECKEDABSENT) {
- /* Wait for progress. Don't just loop */
- D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
- ic->ino, ic->state));
- sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
- } else {
- spin_unlock(&c->inocache_lock);
- }
-
- return NULL;
- }
- } else {
- /* Inode has links to it still; they're not going away because
- jffs2_do_unlink() would need the alloc_sem and we have it.
- Just iget() it, and if read_inode() is necessary that's OK.
- */
- inode = iget(OFNI_BS_2SFFJ(c), inum);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- }
- if (is_bad_inode(inode)) {
- printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
- inum, nlink);
- /* NB. This will happen again. We need to do something appropriate here. */
- iput(inode);
- return ERR_PTR(-EIO);
- }
-
- return JFFS2_INODE_INFO(inode);
-}
-
-unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
- unsigned long offset,
- unsigned long *priv)
-{
- struct inode *inode = OFNI_EDONI_2SFFJ(f);
- struct page *pg;
-
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
- (void *)jffs2_do_readpage_unlock, inode);
- if (IS_ERR(pg))
- return (void *)pg;
-
- *priv = (unsigned long)pg;
- return kmap(pg);
-}
-
-void jffs2_gc_release_page(struct jffs2_sb_info *c,
- unsigned char *ptr,
- unsigned long *priv)
-{
- struct page *pg = (void *)*priv;
-
- kunmap(pg);
- page_cache_release(pg);
-}
-
-int jffs2_flash_setup(struct jffs2_sb_info *c) {
- int ret = 0;
-
- if (jffs2_cleanmarker_oob(c)) {
- /* NAND flash... do setup accordingly */
- ret = jffs2_nand_flash_setup(c);
- if (ret)
- return ret;
- }
+ jffs2_nand_flash_cleanup(c);
- /* add setups for other bizarre flashes here... */
return ret;
}
-
-void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
-
- if (jffs2_cleanmarker_oob(c)) {
- jffs2_nand_flash_cleanup(c);
- }
-
- /* add cleanups for other bizarre flashes here... */
-}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: gc.c,v 1.136 2004/05/27 19:06:09 gleixner Exp $
+ * $Id: gc.c,v 1.114 2003/10/09 13:53:35 dwmw2 Exp $
*
*/
#include <linux/compiler.h>
#include <linux/stat.h>
#include "nodelist.h"
-#include "compr.h"
static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end);
static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
+ struct jffs2_raw_node_ref *raw, struct jffs2_inode_cache *ic);
/* Called with erase_completion_lock held */
static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
nextlist = &c->erasable_list;
} else {
/* Eep. All were empty */
- D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
+ printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
return NULL;
}
*/
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
{
- struct jffs2_inode_info *f;
struct jffs2_inode_cache *ic;
struct jffs2_eraseblock *jeb;
struct jffs2_raw_node_ref *raw;
- int ret = 0, inum, nlink;
+ uint32_t inum;
+ int ret = 0;
if (down_interruptible(&c->alloc_sem))
return -EINTR;
ic->state = INO_STATE_CHECKING;
spin_unlock(&c->inocache_lock);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%d\n", ic->ino));
ret = jffs2_do_crccheck_inode(c, ic);
if (ret)
jeb = jffs2_find_gc_block(c);
if (!jeb) {
- D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+ printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n");
spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
return -EIO;
while(ref_obsolete(raw)) {
D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
- raw = raw->next_phys;
- if (unlikely(!raw)) {
+ jeb->gc_node = raw = raw->next_phys;
+ if (!raw) {
printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
- jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
BUG();
}
}
- jeb->gc_node = raw;
-
D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
-
if (!raw->next_in_ino) {
/* Inode-less node. Clean marker, snapshot or something like that */
/* FIXME: If it's something that needs to be copied, including something
up(&c->alloc_sem);
goto eraseit_lock;
}
-
- ic = jffs2_raw_ref_to_ic(raw);
-
- /* We need to hold the inocache. Either the erase_completion_lock or
- the inocache_lock are sufficient; we trade down since the inocache_lock
- causes less contention. */
- spin_lock(&c->inocache_lock);
+
+ inum = jffs2_raw_ref_to_inum(raw);
+ D1(printk(KERN_DEBUG "Inode number is #%u\n", inum));
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), inum));
/* Three possibilities:
1. Inode is already in-core. We must iget it and do proper
3. Inode is not in-core, node is not pristine. We must iget()
and take the slow path.
*/
+ spin_lock(&c->inocache_lock);
+ ic = jffs2_get_ino_cache(c, inum);
+
+ /* This should never fail unless I'm particularly stupid.
+ So we don't check before dereferencing it */
switch(ic->state) {
case INO_STATE_CHECKEDABSENT:
ic->state = INO_STATE_GC;
else {
D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
- ic->ino));
+ inum));
}
break;
case INO_STATE_PRESENT:
- /* It's in-core. GC must iget() it. */
+ case INO_STATE_UNCHECKED:
+ /* It's in-core or hasn't been checked. GC must iget() it. */
break;
- case INO_STATE_UNCHECKED:
case INO_STATE_CHECKING:
- case INO_STATE_GC:
/* Should never happen. We should have finished checking
- by the time we actually start doing any GC, and since
- we're holding the alloc_sem, no other garbage collection
- can happen.
- */
- printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
- ic->ino, ic->state);
- up(&c->alloc_sem);
- spin_unlock(&c->inocache_lock);
+ by the time we actually start doing any GC. */
BUG();
+
+ case INO_STATE_GC:
+ /* Should never happen. We are holding the alloc_sem,
+ no other garbage collection can happen. Note that we
+ do depend on this later when deciding to do a simple
+ node copy */
+ BUG();
+
case INO_STATE_READING:
/* Someone's currently trying to read it. We must wait for
them to finish and then go through the full iget() route
up(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
- ic->ino, ic->state));
+ inum, ic->state));
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
/* And because we dropped the alloc_sem we must start again from the
beginning. Ponder chance of livelock here -- we're returning success
A: Small enough that I don't care :)
*/
return 0;
+
}
+ spin_unlock(&c->inocache_lock);
+
/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
node intact, and we don't have to muck about with the fragtree etc.
because we know it's not in-core. If it _was_ in-core, we go through
all the iget() crap anyway */
if (ic->state == INO_STATE_GC) {
- spin_unlock(&c->inocache_lock);
-
ret = jffs2_garbage_collect_pristine(c, ic, raw);
+ jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
- spin_lock(&c->inocache_lock);
- ic->state = INO_STATE_CHECKEDABSENT;
- wake_up(&c->inocache_wq);
-
- if (ret != -EBADFD) {
- spin_unlock(&c->inocache_lock);
+ if (ret != -EBADFD)
goto release_sem;
- }
- /* Fall through if it wanted us to, with inocache_lock held */
+ /* Fall through if it wanted us to */
}
- /* Prevent the fairly unlikely race where the gcblock is
- entirely obsoleted by the final close of a file which had
- the only valid nodes in the block, followed by erasure,
- followed by freeing of the ic because the erased block(s)
- held _all_ the nodes of that inode.... never been seen but
- it's vaguely possible. */
-
- inum = ic->ino;
- nlink = ic->nlink;
- spin_unlock(&c->inocache_lock);
-
- f = jffs2_gc_fetch_inode(c, inum, nlink);
- if (IS_ERR(f))
- return PTR_ERR(f);
- if (!f)
- return 0;
-
- ret = jffs2_garbage_collect_live(c, jeb, raw, f);
-
- jffs2_gc_release_inode(c, f);
+ ret = jffs2_garbage_collect_live(c, jeb, raw, ic);
release_sem:
up(&c->alloc_sem);
return ret;
}
+
static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
+ struct jffs2_raw_node_ref *raw, struct jffs2_inode_cache *ic)
{
+ struct jffs2_inode_info *f;
struct jffs2_node_frag *frag;
struct jffs2_full_dnode *fn = NULL;
struct jffs2_full_dirent *fd;
uint32_t start = 0, end = 0, nrfrags = 0;
+ struct inode *inode;
int ret = 0;
+ inode = iget(OFNI_BS_2SFFJ(c), ic->ino);
+ if (is_bad_inode(inode)) {
+ printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u\n", ic->ino);
+ /* NB. This will happen again. We need to do something appropriate here. */
+ up(&c->alloc_sem);
+ iput(inode);
+ return -EIO;
+ }
+
+ f = JFFS2_INODE_INFO(inode);
down(&f->sem);
/* Now we have the lock for this inode. Check that it's still the one at the head
of the list. */
- spin_lock(&c->erase_completion_lock);
-
- if (c->gcblock != jeb) {
- spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
- goto upnout;
- }
if (ref_obsolete(raw)) {
- spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
/* They'll call again */
goto upnout;
}
- spin_unlock(&c->erase_completion_lock);
-
/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
if (f->metadata && f->metadata->raw == raw) {
fn = f->metadata;
if (frag->node && frag->node->raw == raw) {
fn = frag->node;
end = frag->ofs + frag->size;
+#if 1 /* Temporary debugging sanity checks, till we're ready to _trust_ the REF_PRISTINE flag stuff */
+ if (!nrfrags && ref_flags(fn->raw) == REF_PRISTINE) {
+ if (fn->frags > 1) {
+ printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(raw), fn->frags);
+ mark_ref_normal(raw);
+ }
+ /* A hole node which isn't multi-page should be garbage-collected
+ and merged anyway, so we just check for the frag size here,
+ rather than mucking around with actually reading the node
+ and checking the compression type, which is the real way
+ to tell a hole node. */
+ if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE) {
+ printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n",
+ ref_offset(raw));
+ mark_ref_normal(raw);
+ }
+
+ if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE) {
+ printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n",
+ ref_offset(raw), frag->ofs, frag->ofs+frag->size);
+ mark_ref_normal(raw);
+ }
+ }
+#endif
if (!nrfrags++)
start = frag->ofs;
if (nrfrags == frag->node->frags)
}
if (fn) {
if (ref_flags(raw) == REF_PRISTINE) {
- ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
+ ret = jffs2_garbage_collect_pristine(c, ic, raw);
if (!ret) {
/* Urgh. Return it sensibly. */
- frag->node->raw = f->inocache->nodes;
+ frag->node->raw = ic->nodes;
}
if (ret != -EBADFD)
goto upnout;
}
upnout:
up(&f->sem);
+ iput(inode);
return ret;
}
size_t retlen;
int ret;
uint32_t phys_ofs, alloclen;
- uint32_t crc, rawlen;
+ uint32_t crc;
int retried = 0;
D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
- rawlen = ref_totlen(c, c->gcblock, raw);
-
/* Ask for a small amount of space (or the totlen if smaller) because we
don't want to force wastage of the end of a block if splitting would
work. */
- ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN,
- rawlen), &phys_ofs, &alloclen);
+ ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, raw->totlen),
+ &phys_ofs, &alloclen);
if (ret)
return ret;
- if (alloclen < rawlen) {
+ if (alloclen < raw->totlen) {
/* Doesn't fit untouched. We'll go the old route and split it */
return -EBADFD;
}
- node = kmalloc(rawlen, GFP_KERNEL);
+ node = kmalloc(raw->totlen, GFP_KERNEL);
if (!node)
return -ENOMEM;
- ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
- if (!ret && retlen != rawlen)
+ ret = jffs2_flash_read(c, ref_offset(raw), raw->totlen, &retlen, (char *)node);
+ if (!ret && retlen != raw->totlen)
ret = -EIO;
if (ret)
goto out_node;
/* OK, all the CRCs are good; this node can just be copied as-is. */
retry:
nraw->flash_offset = phys_ofs;
- nraw->__totlen = rawlen;
+ nraw->totlen = raw->totlen;
nraw->next_phys = NULL;
- ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
+ ret = jffs2_flash_write(c, phys_ofs, raw->totlen, &retlen, (char *)node);
- if (ret || (retlen != rawlen)) {
+ if (ret || (retlen != raw->totlen)) {
printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
- rawlen, phys_ofs, ret, retlen);
+ raw->totlen, phys_ofs, ret, retlen);
if (retlen) {
/* Doesn't belong to any inode */
nraw->next_in_ino = NULL;
ACCT_SANITY_CHECK(c,jeb);
D1(ACCT_PARANOIA_CHECK(jeb));
- ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy);
+ ret = jffs2_reserve_space_gc(c, raw->totlen, &phys_ofs, &dummy);
if (!ret) {
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
return -ENOMEM;
}
- ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
+ ret = jffs2_read_dnode(c, fn, mdata, 0, mdatalen);
if (ret) {
printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
kfree(mdata);
delete a 'real' dirent with the same name that's still
somewhere else on the flash. */
if (!jffs2_can_mark_obsolete(c)) {
- struct jffs2_raw_dirent *rd;
+ struct jffs2_raw_dirent rd;
struct jffs2_raw_node_ref *raw;
int ret;
size_t retlen;
int name_len = strlen(fd->name);
uint32_t name_crc = crc32(0, fd->name, name_len);
- uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
-
- rd = kmalloc(rawlen, GFP_KERNEL);
- if (!rd)
- return -ENOMEM;
+ char *namebuf = NULL;
/* Prevent the erase code from nicking the obsolete node refs while
we're looking at them. I really don't like this extra lock but
down(&c->erase_free_sem);
for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
-
/* We only care about obsolete ones */
if (!(ref_obsolete(raw)))
continue;
- /* Any dirent with the same name is going to have the same length... */
- if (ref_totlen(c, NULL, raw) != rawlen)
- continue;
-
/* Doesn't matter if there's one in the same erase block. We're going to
delete it too at the same time. */
if ((raw->flash_offset & ~(c->sector_size-1)) ==
(fd->raw->flash_offset & ~(c->sector_size-1)))
continue;
- D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
-
- /* This is an obsolete node belonging to the same directory, and it's of the right
- length. We need to take a closer look...*/
- ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
+ /* This is an obsolete node belonging to the same directory */
+ ret = jffs2_flash_read(c, ref_offset(raw), sizeof(struct jffs2_unknown_node), &retlen, (char *)&rd);
if (ret) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading header from obsolete node at %08x\n", ret, ref_offset(raw));
/* If we can't read it, we don't need to continue to obsolete it. Continue */
continue;
}
- if (retlen != rawlen) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
- retlen, rawlen, ref_offset(raw));
+ if (retlen != sizeof(struct jffs2_unknown_node)) {
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %zd) reading header from obsolete node at %08x\n",
+ retlen, sizeof(struct jffs2_unknown_node), ref_offset(raw));
continue;
}
+ if (je16_to_cpu(rd.nodetype) != JFFS2_NODETYPE_DIRENT ||
+ PAD(je32_to_cpu(rd.totlen)) != PAD(sizeof(rd) + name_len))
+ continue;
- if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
+ /* OK, it's a dirent node, it's the right length. We have to take a
+ closer look at it... */
+ ret = jffs2_flash_read(c, ref_offset(raw), sizeof(rd), &retlen, (char *)&rd);
+ if (ret) {
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading from obsolete node at %08x\n", ret, ref_offset(raw));
+ /* If we can't read it, we don't need to continune to obsolete it. Continue */
continue;
+ }
+ if (retlen != sizeof(rd)) {
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %zd) reading from obsolete node at %08x\n",
+ retlen, sizeof(rd), ref_offset(raw));
+ continue;
+ }
/* If the name CRC doesn't match, skip */
- if (je32_to_cpu(rd->name_crc) != name_crc)
+ if (je32_to_cpu(rd.name_crc) != name_crc)
continue;
-
/* If the name length doesn't match, or it's another deletion dirent, skip */
- if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
+ if (rd.nsize != name_len || !je32_to_cpu(rd.ino))
continue;
/* OK, check the actual name now */
- if (memcmp(rd->name, fd->name, name_len))
+ if (!namebuf) {
+ namebuf = kmalloc(name_len + 1, GFP_KERNEL);
+ if (!namebuf) {
+ up(&c->erase_free_sem);
+ return -ENOMEM;
+ }
+ }
+ /* We read the extra byte before it so it's a word-aligned read */
+ ret = jffs2_flash_read(c, (ref_offset(raw))+sizeof(rd)-1, name_len+1, &retlen, namebuf);
+ if (ret) {
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading name from obsolete node at %08x\n", ret, ref_offset(raw));
+ /* If we can't read it, we don't need to continune to obsolete it. Continue */
+ continue;
+ }
+ if (retlen != name_len+1) {
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %d) reading name from obsolete node at %08x\n",
+ retlen, name_len+1, ref_offset(raw));
+ continue;
+ }
+ if (memcmp(namebuf+1, fd->name, name_len))
continue;
/* OK. The name really does match. There really is still an older node on
the flash which our deletion dirent obsoletes. So we have to write out
a new deletion dirent to replace it */
- up(&c->erase_free_sem);
-
- D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
- ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
- kfree(rd);
+
+ if (namebuf)
+ kfree(namebuf);
+ up(&c->erase_free_sem);
return jffs2_garbage_collect_dirent(c, jeb, f, fd);
}
up(&c->erase_free_sem);
- kfree(rd);
+
+ if (namebuf)
+ kfree(namebuf);
}
/* No need for it any more. Just mark it obsolete and remove it from the list */
je32_to_cpu(ri.ino));
});
- /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
- mark_ref_normal(new_fn->raw);
-
for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
frag; frag = frag_next(frag)) {
if (frag->ofs > fn->size + fn->ofs)
uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- unsigned long pg;
+ struct page *pg;
unsigned char *pg_ptr;
-
+ /* FIXME: */ struct inode *inode = OFNI_EDONI_2SFFJ(f);
+
memset(&ri, 0, sizeof(ri));
D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
* page OK. We'll actually write it out again in commit_write, which is a little
* suboptimal, but at least we're correct.
*/
- pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
-
- if (IS_ERR(pg_ptr)) {
- printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
- return PTR_ERR(pg_ptr);
+#ifdef __ECOS
+ pg = read_cache_page(start >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode);
+#else
+ pg = read_cache_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode);
+#endif
+ if (IS_ERR(pg)) {
+ printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg));
+ return PTR_ERR(pg);
}
+ pg_ptr = (char *)kmap(pg);
+ comprbuf = kmalloc(end - start, GFP_KERNEL);
offset = start;
while(offset < orig_end) {
uint32_t datalen;
uint32_t cdatalen;
- uint16_t comprtype = JFFS2_COMPR_NONE;
+ char comprtype = JFFS2_COMPR_NONE;
ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen);
writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
- comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
-
+ if (comprbuf) {
+ comprtype = jffs2_compress(writebuf, comprbuf, &datalen, &cdatalen);
+ }
+ if (comprtype) {
+ writebuf = comprbuf;
+ } else {
+ datalen = cdatalen;
+ }
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
ri.offset = cpu_to_je32(offset);
ri.csize = cpu_to_je32(cdatalen);
ri.dsize = cpu_to_je32(datalen);
- ri.compr = comprtype & 0xff;
- ri.usercompr = (comprtype >> 8) & 0xff;
+ ri.compr = comprtype;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
- ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+ ri.data_crc = cpu_to_je32(crc32(0, writebuf, cdatalen));
- new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
-
- jffs2_free_comprbuf(comprbuf, writebuf);
+ new_fn = jffs2_write_dnode(c, f, &ri, writebuf, cdatalen, phys_ofs, ALLOC_GC);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
f->metadata = NULL;
}
}
+ if (comprbuf) kfree(comprbuf);
- jffs2_gc_release_page(c, pg_ptr, &pg);
+ kunmap(pg);
+ /* XXX: Does the page get freed automatically? */
+ /* AAA: Judging by the unmount getting stuck in __wait_on_page, nope. */
+ page_cache_release(pg);
return ret;
}
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Copyright (C) 2001 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@redhat.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: ioctl.c,v 1.8 2003/10/28 16:16:28 dwmw2 Exp $
+ * $Id: ioctl.c,v 1.7 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
{
/* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
will include compression support etc. */
- return -ENOTTY;
+ return -EINVAL;
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: malloc.c,v 1.27 2003/10/28 17:14:58 dwmw2 Exp $
+ * $Id: malloc.c,v 1.25 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodelist.c,v 1.86 2003/10/31 15:37:51 dwmw2 Exp $
+ * $Id: nodelist.c,v 1.80 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
/* Put a new tmp_dnode_info into the list, keeping the list in
order of increasing version
*/
-static void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list)
+void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list)
{
struct jffs2_tmp_dnode_info **prev = list;
cond_resched();
/* FIXME: point() */
- err = jffs2_flash_read(c, (ref_offset(ref)),
- min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
- &retlen, (void *)&node);
+ err = jffs2_flash_read(c, (ref_offset(ref)), min_t(uint32_t, ref->totlen, sizeof(node)), &retlen, (void *)&node);
if (err) {
printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
goto free_out;
/* Check we've managed to read at least the common node header */
- if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
+ if (retlen < min_t(uint32_t, ref->totlen, sizeof(node.u))) {
printk(KERN_WARNING "short read in get_inode_nodes()\n");
err = -EIO;
goto free_out;
/* If we've never checked the CRCs on this node, check them now. */
if (ref_flags(ref) == REF_UNCHECKED) {
- uint32_t crc, len;
+ uint32_t crc;
struct jffs2_eraseblock *jeb;
crc = crc32(0, &node, sizeof(node.i)-8);
/* Mark the node as having been checked and fix the accounting accordingly */
spin_lock(&c->erase_completion_lock);
jeb = &c->blocks[ref->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, ref);
-
- jeb->used_size += len;
- jeb->unchecked_size -= len;
- c->used_size += len;
- c->unchecked_size -= len;
+ jeb->used_size += ref->totlen;
+ jeb->unchecked_size -= ref->totlen;
+ c->used_size += ref->totlen;
+ c->unchecked_size -= ref->totlen;
/* If node covers at least a whole page, or if it starts at the
beginning of a page and runs to the end of the file, or if
default:
if (ref_flags(ref) == REF_UNCHECKED) {
struct jffs2_eraseblock *jeb;
- uint32_t len;
printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
je16_to_cpu(node.u.nodetype), ref_offset(ref));
/* Mark the node as having been checked and fix the accounting accordingly */
spin_lock(&c->erase_completion_lock);
jeb = &c->blocks[ref->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, ref);
-
- jeb->used_size += len;
- jeb->unchecked_size -= len;
- c->used_size += len;
- c->unchecked_size -= len;
+ jeb->used_size += ref->totlen;
+ jeb->unchecked_size -= ref->totlen;
+ c->used_size += ref->totlen;
+ c->unchecked_size -= ref->totlen;
mark_ref_normal(ref);
spin_unlock(&c->erase_completion_lock);
jffs2_free_node_frag(frag);
frag = parent;
-
- cond_resched();
}
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodelist.h,v 1.119 2004/05/26 12:28:12 gleixner Exp $
+ * $Id: nodelist.h,v 1.104 2003/10/08 11:45:11 dwmw2 Exp $
*
*/
#define D2(x)
#endif
-#define JFFS2_NATIVE_ENDIAN
-
-/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
- whatever OS we're actually running on here too. */
-
-#if defined(JFFS2_NATIVE_ENDIAN)
-#define cpu_to_je16(x) ((jint16_t){x})
-#define cpu_to_je32(x) ((jint32_t){x})
-#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
-
-#define je16_to_cpu(x) ((x).v16)
-#define je32_to_cpu(x) ((x).v32)
-#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
-#elif defined(JFFS2_BIG_ENDIAN)
-#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
-#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
-#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
-
-#define je16_to_cpu(x) (be16_to_cpu(x.v16))
-#define je32_to_cpu(x) (be32_to_cpu(x.v32))
-#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
-#elif defined(JFFS2_LITTLE_ENDIAN)
-#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
-#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
-#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
-
-#define je16_to_cpu(x) (le16_to_cpu(x.v16))
-#define je32_to_cpu(x) (le32_to_cpu(x.v32))
-#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
-#else
-#error wibble
-#endif
-
/*
This is all we need to keep in-core for each raw node during normal
operation. As and when we do read_inode on a particular inode, we can
word so you know when you've got there :) */
struct jffs2_raw_node_ref *next_phys;
uint32_t flash_offset;
- uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
-};
-
+ uint32_t totlen;
+
/* flash_offset & 3 always has to be zero, because nodes are
always aligned at 4 bytes. So we have a couple of extra bits
- to play with, which indicate the node's status; see below: */
+ to play with. So we set the least significant bit to 1 to
+ signify that the node is obsoleted by later nodes.
+ */
#define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */
#define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */
#define REF_PRISTINE 2 /* Completely clean. GC without looking */
#define ref_offset(ref) ((ref)->flash_offset & ~3)
#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
+};
/*
Used for keeping track of deletion nodes &c, which can only be marked
BUG(); \
} \
if (ref_flags(ref2) == REF_UNCHECKED) \
- my_unchecked_size += ref_totlen(c, jeb, ref2); \
+ my_unchecked_size += ref2->totlen; \
else if (!ref_obsolete(ref2)) \
- my_used_size += ref_totlen(c, jeb, ref2); \
+ my_used_size += ref2->totlen; \
if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \
printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \
ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \
} \
} while(0)
-/* Calculate totlen from surrounding nodes or eraseblock */
-static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
- struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *ref)
-{
- uint32_t ref_end;
-
- if (ref->next_phys)
- ref_end = ref_offset(ref->next_phys);
- else {
- if (!jeb)
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
-
- /* Last node in block. Use free_space */
- BUG_ON(ref != jeb->last_node);
- ref_end = jeb->offset + c->sector_size - jeb->free_size;
- }
- return ref_end - ref_offset(ref);
-}
-
-static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
- struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *ref)
-{
- uint32_t ret;
-
- D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
- printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
- jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
- BUG();
- })
-
-#if 1
- ret = ref->__totlen;
-#else
- /* This doesn't actually work yet */
- ret = __ref_totlen(c, jeb, ref);
- if (ret != ref->__totlen) {
- printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
- ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
- ret, ref->__totlen);
- if (!jeb)
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
- paranoia_failed_dump(jeb);
- BUG();
- }
-#endif
- return ret;
-}
-
-
#define ALLOC_NORMAL 0 /* Normal allocation */
#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
#define ALLOC_GC 2 /* Space requested for GC. Give it or die */
#define PAD(x) (((x)+3)&~3)
-static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
+static inline int jffs2_raw_ref_to_inum(struct jffs2_raw_node_ref *raw)
{
while(raw->next_in_ino) {
raw = raw->next_in_ino;
}
- return ((struct jffs2_inode_cache *)raw);
+ return ((struct jffs2_inode_cache *)raw)->ino;
}
static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
/* nodelist.c */
D1(void jffs2_print_frag_list(struct jffs2_inode_info *f));
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
+void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list);
int jffs2_get_inode_nodes(struct jffs2_sb_info *c, ino_t ino, struct jffs2_inode_info *f,
struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
uint32_t *highest_version, uint32_t *latest_mctime,
void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
/* nodemgmt.c */
-int jffs2_thread_should_wake(struct jffs2_sb_info *c);
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio);
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
/* read.c */
-int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct jffs2_full_dnode *fd, unsigned char *buf,
- int ofs, int len);
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len);
int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *buf, uint32_t offset, uint32_t len);
char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
+
+/* compr.c */
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t *datalen, uint32_t *cdatalen);
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
+ unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+
/* scan.c */
int jffs2_scan_medium(struct jffs2_sb_info *c);
void jffs2_rotate_lists(struct jffs2_sb_info *c);
/* erase.c */
void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c);
+void jffs2_erase_pending_trigger(struct jffs2_sb_info *c);
#ifdef CONFIG_JFFS2_FS_NAND
/* wbuf.c */
int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_nand_read_failcnt(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#endif
+/* compr_zlib.c */
+int jffs2_zlib_init(void);
+void jffs2_zlib_exit(void);
+
#endif /* __JFFS2_NODELIST_H__ */
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodemgmt.c,v 1.107 2003/11/26 15:30:58 dwmw2 Exp $
+ * $Id: nodemgmt.c,v 1.102 2003/10/08 17:21:19 dwmw2 Exp $
*
*/
if (list_empty(&c->free_list)) {
+ DECLARE_WAITQUEUE(wait, current);
+
if (!c->nr_erasing_blocks &&
!list_empty(&c->erasable_list)) {
struct jffs2_eraseblock *ejeb;
list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
return -ENOSPC;
}
-
+ /* Make sure this can't deadlock. Someone has to start the erases
+ of erase_pending blocks */
+#ifdef __ECOS
+ /* In eCos, we don't have a handy kernel thread doing the erases for
+ us. We do them ourselves right now. */
+ jffs2_erase_pending_blocks(c);
+#else
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&c->erase_wait, &wait);
+ D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
+ c->nr_erasing_blocks, list_empty(&c->erasable_list)?"yes":"no",
+ list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"));
+ if (!list_empty(&c->erase_pending_list)) {
+ D1(printk(KERN_DEBUG "Triggering pending erases\n"));
+ jffs2_erase_pending_trigger(c);
+ }
spin_unlock(&c->erase_completion_lock);
- /* Don't wait for it; just erase one right now */
- jffs2_erase_pending_blocks(c, 1);
+ schedule();
+ remove_wait_queue(&c->erase_wait, &wait);
spin_lock(&c->erase_completion_lock);
-
+ if (signal_pending(current)) {
+ return -EINTR;
+ }
+#endif
/* An erase may have failed, decreasing the
amount of free space available. So we must
restart from the beginning */
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
{
struct jffs2_eraseblock *jeb;
- uint32_t len;
+ uint32_t len = new->totlen;
jeb = &c->blocks[new->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, new);
-
D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
#if 1
if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
spin_lock(&c->erase_completion_lock);
if (ref_flags(ref) == REF_UNCHECKED) {
- D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
+ D1(if (unlikely(jeb->unchecked_size < ref->totlen)) {
printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
- ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
- jeb->unchecked_size -= ref_totlen(c, jeb, ref);
- c->unchecked_size -= ref_totlen(c, jeb, ref);
+ D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
+ jeb->unchecked_size -= ref->totlen;
+ c->unchecked_size -= ref->totlen;
} else {
- D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
+ D1(if (unlikely(jeb->used_size < ref->totlen)) {
printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
- ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
- jeb->used_size -= ref_totlen(c, jeb, ref);
- c->used_size -= ref_totlen(c, jeb, ref);
+ D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
+ jeb->used_size -= ref->totlen;
+ c->used_size -= ref->totlen;
}
// Take care, that wasted size is taken into concern
- if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
+ if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref->totlen)) && jeb != c->nextblock) {
D1(printk("Dirtying\n"));
- addedsize = ref_totlen(c, jeb, ref);
- jeb->dirty_size += ref_totlen(c, jeb, ref);
- c->dirty_size += ref_totlen(c, jeb, ref);
+ addedsize = ref->totlen;
+ jeb->dirty_size += ref->totlen;
+ c->dirty_size += ref->totlen;
/* Convert wasted space to dirty, if not a bad block */
if (jeb->wasted_size) {
} else {
D1(printk("Wasting\n"));
addedsize = 0;
- jeb->wasted_size += ref_totlen(c, jeb, ref);
- c->wasted_size += ref_totlen(c, jeb, ref);
+ jeb->wasted_size += ref->totlen;
+ c->wasted_size += ref->totlen;
}
ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
if (jffs2_wbuf_dirty(c)) {
D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
+#if 0 /* This check was added to allow us to find places where we added nodes to the lists
+ after dropping the alloc_sem, and it did that just fine. But it also caused us to
+ lock the alloc_sem in other places, like clear_inode(), when we wouldn't otherwise
+ have needed to. So I suspect it's outlived its usefulness. Thomas? */
+
+ /* We've changed the rules slightly. After
+ writing a node you now mustn't drop the
+ alloc_sem before you've finished all the
+ list management - this is so that when we
+ get here, we know that no other nodes have
+ been written, and the above check on wbuf
+ is valid - wbuf_len is nonzero IFF the node
+ which obsoletes this node is still in the
+ wbuf.
+
+ So we BUG() if that new rule is broken, to
+ make sure we catch it and fix it.
+ */
+ if (!down_trylock(&c->alloc_sem)) {
+ up(&c->alloc_sem);
+ printk(KERN_CRIT "jffs2_mark_node_obsolete() called with wbuf active but alloc_sem not locked!\n");
+ BUG();
+ }
+#endif
} else {
if (jiffies & 127) {
/* Most of the time, we just erase it immediately. Otherwise we
printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
return;
}
- if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
- printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
+ if (PAD(je32_to_cpu(n.totlen)) != PAD(ref->totlen)) {
+ printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen in node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref->totlen);
return;
}
if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
- D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+ D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
return;
}
/* XXX FIXME: This is ugly now */
}
}
#endif /* CONFIG_JFFS2_FS_DEBUG */
-
-int jffs2_thread_should_wake(struct jffs2_sb_info *c)
-{
- int ret = 0;
- uint32_t dirty;
-
- if (c->unchecked_size) {
- D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
- c->unchecked_size, c->checked_ino));
- return 1;
- }
-
- /* dirty_size contains blocks on erase_pending_list
- * those blocks are counted in c->nr_erasing_blocks.
- * If one block is actually erased, it is not longer counted as dirty_space
- * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
- * with c->nr_erasing_blocks * c->sector_size again.
- * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
- * This helps us to force gc and pick eventually a clean block to spread the load.
- */
- dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
-
- if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
- (dirty > c->nospc_dirty_size))
- ret = 1;
-
- D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
- c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
-
- return ret;
-}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: os-linux.h,v 1.47 2004/07/14 13:20:23 dwmw2 Exp $
+ * $Id: os-linux.h,v 1.37 2003/10/11 11:47:23 dwmw2 Exp $
*
*/
#define kstatfs statfs
#endif
-struct kstatfs;
-struct kvec;
-
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
#define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode))
#define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode)
#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime)
#endif
+/* Hmmm. P'raps generic code should only ever see versions of signal
+ functions which do the locking automatically? */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,40) && !defined(__rh_config_h__)
+#define current_sig_lock current->sigmask_lock
+#else
+#define current_sig_lock current->sighand->siglock
+#endif
+
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
#define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; })
#define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; })
-#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
+#define jffs2_nand_read_failcnt(c,jeb) do { ; } while(0)
+#define jffs2_write_nand_badblock(c,jeb) do { ; } while(0)
#define jffs2_nand_flash_setup(c) (0)
#define jffs2_nand_flash_cleanup(c) do {} while(0)
#define jffs2_wbuf_dirty(c) (0)
#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
+struct kstatfs;
+struct kvec;
/* wbuf.c */
int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino);
int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf);
int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,int mode);
int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
-int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
void jffs2_wbuf_timeout(unsigned long data);
void jffs2_wbuf_process(void *data);
int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
#endif /* NAND */
-/* erase.c */
-static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
-{
- OFNI_BS_2SFFJ(c)->s_dirt = 1;
-}
-
/* background.c */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
void jffs2_write_super (struct super_block *);
int jffs2_remount_fs (struct super_block *, int *, char *);
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
-void jffs2_gc_release_inode(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f);
-struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
- int inum, int nlink);
-
-unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
- unsigned long offset,
- unsigned long *priv);
-void jffs2_gc_release_page(struct jffs2_sb_info *c,
- unsigned char *pg,
- unsigned long *priv);
-int jffs2_flash_setup(struct jffs2_sb_info *c);
-void jffs2_flash_cleanup(struct jffs2_sb_info *c);
-
/* writev.c */
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
+/* super.c */
+
#endif /* __JFFS2_OS_LINUX_H__ */
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: read.c,v 1.36 2004/05/25 11:12:32 havasi Exp $
+ * $Id: read.c,v 1.34 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
#include <linux/mtd/mtd.h>
#include <linux/compiler.h>
#include "nodelist.h"
-#include "compr.h"
-int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct jffs2_full_dnode *fd, unsigned char *buf,
- int ofs, int len)
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len)
{
struct jffs2_raw_inode *ri;
size_t readlen;
if (ri->compr != JFFS2_COMPR_NONE) {
D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
- ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
+ ret = jffs2_decompress(ri->compr, readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
if (ret) {
printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
goto out_decomprbuf;
D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
frag->ofs+fragofs, frag->ofs+fragofs+readlen,
ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
- ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
+ ret = jffs2_read_dnode(c, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
D2(printk(KERN_DEBUG "node read done\n"));
if (ret) {
D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
}
buf[f->metadata->size]=0;
- ret = jffs2_read_dnode(c, f, f->metadata, buf, 0, f->metadata->size);
+ ret = jffs2_read_dnode(c, f->metadata, buf, 0, f->metadata->size);
up(&f->sem);
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: readinode.c,v 1.113 2003/11/03 13:20:33 dwmw2 Exp $
+ * $Id: readinode.c,v 1.107 2003/10/04 08:33:06 dwmw2 Exp $
*
*/
printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
}
}
-
-static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f)
-{
- struct jffs2_node_frag *frag;
- int bitched = 0;
-
- for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
-
- struct jffs2_full_dnode *fn = frag->node;
- if (!fn || !fn->raw)
- continue;
-
- if (ref_flags(fn->raw) == REF_PRISTINE) {
-
- if (fn->frags > 1) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags);
- bitched = 1;
- }
- /* A hole node which isn't multi-page should be garbage-collected
- and merged anyway, so we just check for the frag size here,
- rather than mucking around with actually reading the node
- and checking the compression type, which is the real way
- to tell a hole node. */
- if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n",
- ref_offset(fn->raw));
- bitched = 1;
- }
-
- if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n",
- ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
- bitched = 1;
- }
- }
- }
-
- if (bitched) {
- struct jffs2_node_frag *thisfrag;
-
- printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino);
- thisfrag = frag_first(&f->fragtree);
- while (thisfrag) {
- if (!thisfrag->node) {
- printk("Frag @0x%x-0x%x; node-less hole\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs);
- } else if (!thisfrag->node->raw) {
- printk("Frag @0x%x-0x%x; raw-less hole\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs);
- } else {
- printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs,
- ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw),
- thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size);
- }
- thisfrag = frag_next(thisfrag);
- }
- }
- return bitched;
-}
#endif /* D1 */
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
mark_ref_normal(next->node->raw);
}
}
- D2(if (jffs2_sanitycheck_fragtree(f)) {
- printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n",
- fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
- return 0;
- })
D2(jffs2_print_frag_list(f));
return 0;
}
}
}
spin_unlock(&c->inocache_lock);
-
if (!f->inocache && ino == 1) {
/* Special case - no root inode on medium */
f->inocache = jffs2_alloc_inode_cache();
fn = tn->fn;
if (f->metadata) {
- if (likely(tn->version >= mdata_ver)) {
+ if (tn->version > mdata_ver) {
D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw)));
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
mdata_ver = 0;
} else {
- /* This should never happen. */
- printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n",
- ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw));
+ D1(printk(KERN_DEBUG "Er. New metadata at 0x%08x with ver %d is actually older than previous %d\n",
+ ref_offset(f->metadata->raw), tn->version, mdata_ver));
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
- /* Fill in latest_node from the metadata, not this one we're about to free... */
- fn = f->metadata;
goto next_tn;
}
}
tn_list = tn->next;
jffs2_free_tmp_dnode_info(tn);
}
- D1(jffs2_sanitycheck_fragtree(f));
-
if (!fn) {
/* No data nodes for this inode. */
if (f->inocache->ino != 1) {
void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
{
struct jffs2_full_dirent *fd, *fds;
- int deleted;
+ /* I don't think we care about the potential race due to reading this
+ without f->sem. It can never get undeleted. */
+ int deleted = f->inocache && !f->inocache->nlink;
+
+ /* If it's a deleted inode, grab the alloc_sem. This prevents
+ jffs2_garbage_collect_pass() from deciding that it wants to
+ garbage collect one of the nodes we're just about to mark
+ obsolete -- by the time we drop alloc_sem and return, all
+ the nodes are marked obsolete, and jffs2_g_c_pass() won't
+ call iget() for the inode in question.
+
+ We also used to do this to keep the temporary BUG() in
+ jffs2_mark_node_obsolete() from triggering.
+ */
+ if(deleted)
+ down(&c->alloc_sem);
down(&f->sem);
- deleted = f->inocache && !f->inocache->nlink;
if (f->metadata) {
if (deleted)
jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
up(&f->sem);
+
+ if(deleted)
+ up(&c->alloc_sem);
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: scan.c,v 1.110 2004/06/17 17:15:31 gleixner Exp $
+ * $Id: scan.c,v 1.104 2003/10/11 14:52:48 dwmw2 Exp $
*
*/
#include <linux/kernel.h>
uint32_t hdr_crc, buf_ofs, buf_len;
int err;
int noise = 0;
+ int wasempty = 0;
+ uint32_t empty_start = 0;
#ifdef CONFIG_JFFS2_FS_NAND
int cleanmarkerfound = 0;
#endif
switch (ret) {
case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
case 1: return BLK_STATE_ALLDIRTY;
+ case 2: return BLK_STATE_BADBLOCK; /* case 2/3 are paranoia checks */
+ case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
default: return ret;
}
}
noise = 10;
-scan_more:
while(ofs < jeb->offset + c->sector_size) {
D1(ACCT_PARANOIA_CHECK(jeb));
node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
- uint32_t inbuf_ofs;
- uint32_t empty_start;
+ uint32_t inbuf_ofs = ofs - buf_ofs + 4;
+ uint32_t scanend;
empty_start = ofs;
ofs += 4;
+ /* If scanning empty space after only a cleanmarker, don't
+ bother scanning the whole block */
+ if (unlikely(empty_start == jeb->offset + c->cleanmarker_size &&
+ jeb->offset + EMPTY_SCAN_SIZE < buf_ofs + buf_len))
+ scanend = jeb->offset + EMPTY_SCAN_SIZE - buf_ofs;
+ else
+ scanend = buf_len;
+
D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
- more_empty:
- inbuf_ofs = ofs - buf_ofs;
- while (inbuf_ofs < buf_len) {
- if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
- printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
- empty_start, ofs);
- DIRTY_SPACE(ofs-empty_start);
- goto scan_more;
- }
+ while (inbuf_ofs < scanend) {
+ if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)
+ goto emptyends;
inbuf_ofs+=4;
ofs += 4;
}
/* Ran off end. */
- D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+ D1(printk(KERN_DEBUG "Empty flash ends normally at 0x%08x\n", ofs));
- /* If we're only checking the beginning of a block with a cleanmarker,
- bail now */
- if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
- c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) {
- D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE));
+ if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
+ c->cleanmarker_size && !jeb->first_node->next_in_ino && !jeb->dirty_size)
return BLK_STATE_CLEANMARKER;
- }
-
- /* See how much more there is to read in this eraseblock... */
- buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
- if (!buf_len) {
- /* No more to read. Break out of main loop without marking
- this range of empty space as dirty (because it's not) */
- D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
- empty_start));
- break;
- }
- D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
- err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
- if (err)
- return err;
- buf_ofs = ofs;
- goto more_empty;
+ wasempty = 1;
+ continue;
+ } else if (wasempty) {
+ emptyends:
+ printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", empty_start, ofs);
+ DIRTY_SPACE(ofs-empty_start);
+ wasempty = 0;
+ continue;
}
if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
marker_ref->next_in_ino = NULL;
marker_ref->next_phys = NULL;
marker_ref->flash_offset = ofs | REF_NORMAL;
- marker_ref->__totlen = c->cleanmarker_size;
+ marker_ref->totlen = c->cleanmarker_size;
jeb->first_node = jeb->last_node = marker_ref;
USED_SPACE(PAD(c->cleanmarker_size));
}
if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
- && (!jeb->first_node || !jeb->first_node->next_in_ino) )
+ && (!jeb->first_node || jeb->first_node->next_in_ino) )
return BLK_STATE_CLEANMARKER;
/* move blocks with max 4 byte dirty space to cleanlist */
if (ic)
return ic;
- if (ino > c->highest_ino)
- c->highest_ino = ino;
-
ic = jffs2_alloc_inode_cache();
if (!ic) {
printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
ic->nodes = (void *)ic;
jffs2_add_ino_cache(c, ic);
if (ino == 1)
- ic->nlink = 1;
+ ic->nlink=1;
return ic;
}
/* Wheee. It worked */
raw->flash_offset = ofs | REF_UNCHECKED;
- raw->__totlen = PAD(je32_to_cpu(ri->totlen));
+ raw->totlen = PAD(je32_to_cpu(ri->totlen));
raw->next_phys = NULL;
raw->next_in_ino = ic->nodes;
return -ENOMEM;
}
- raw->__totlen = PAD(je32_to_cpu(rd->totlen));
+ raw->totlen = PAD(je32_to_cpu(rd->totlen));
raw->flash_offset = ofs | REF_PRISTINE;
raw->next_phys = NULL;
raw->next_in_ino = ic->nodes;
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: super.c,v 1.97 2004/07/16 15:17:57 dwmw2 Exp $
+ * $Id: super.c,v 1.90 2003/10/11 11:47:23 dwmw2 Exp $
*
*/
#include <linux/mtd/mtd.h>
#include <linux/ctype.h>
#include <linux/namei.h>
-#include "compr.h"
#include "nodelist.h"
static void jffs2_put_super(struct super_block *);
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
kfree(c->blocks);
- jffs2_flash_cleanup(c);
+ jffs2_nand_flash_cleanup(c);
kfree(c->inocache_list);
if (c->mtd->sync)
c->mtd->sync(c->mtd);
int ret;
printk(KERN_INFO "JFFS2 version 2.2."
-#ifdef CONFIG_JFFS2_FS_NAND
+#ifdef CONFIG_FS_JFFS2_NAND
" (NAND)"
#endif
" (C) 2001-2003 Red Hat, Inc.\n");
printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
return -ENOMEM;
}
- ret = jffs2_compressors_init();
+ ret = jffs2_zlib_init();
if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+ printk(KERN_ERR "JFFS2 error: Failed to initialise zlib workspaces\n");
goto out;
}
ret = jffs2_create_slab_caches();
if (ret) {
printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
- goto out_compressors;
+ goto out_zlib;
}
ret = register_filesystem(&jffs2_fs_type);
if (ret) {
out_slab:
jffs2_destroy_slab_caches();
- out_compressors:
- jffs2_compressors_exit();
+ out_zlib:
+ jffs2_zlib_exit();
out:
return ret;
}
{
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
- jffs2_compressors_exit();
+ jffs2_zlib_exit();
kmem_cache_destroy(jffs2_inode_cachep);
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: symlink.c,v 1.13 2004/07/13 08:59:04 dwmw2 Exp $
+ * $Id: symlink.c,v 1.12 2003/10/04 08:33:07 dwmw2 Exp $
*
*/
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright (C) 2001-2003 Red Hat, Inc.
- * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
*
* Created by David Woodhouse <dwmw2@redhat.com>
- * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: wbuf.c,v 1.70 2004/07/13 08:58:25 dwmw2 Exp $
+ * $Id: wbuf.c,v 1.53 2003/10/11 11:46:09 dwmw2 Exp $
*
*/
#endif
/* max. erase failures before we mark a block bad */
-#define MAX_ERASE_FAILURES 2
+#define MAX_ERASE_FAILURES 5
/* two seconds timeout for timed wbuf-flushing */
#define WBUF_FLUSH_TIMEOUT 2 * HZ
first_raw = &jeb->first_node;
while (*first_raw &&
(ref_obsolete(*first_raw) ||
- (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
+ (ref_offset(*first_raw) + (*first_raw)->totlen) < c->wbuf_ofs)) {
D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
ref_offset(*first_raw), ref_flags(*first_raw),
- (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
+ (ref_offset(*first_raw) + (*first_raw)->totlen),
c->wbuf_ofs));
first_raw = &(*first_raw)->next_phys;
}
}
start = ref_offset(*first_raw);
- end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
+ end = ref_offset(*first_raw) + (*first_raw)->totlen;
/* Find the last node to be recovered */
raw = first_raw;
while ((*raw)) {
if (!ref_obsolete(*raw))
- end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
+ end = ref_offset(*raw) + (*raw)->totlen;
raw = &(*raw)->next_phys;
}
return;
raw2->flash_offset = ofs | REF_OBSOLETE;
- raw2->__totlen = ref_totlen(c, jeb, *first_raw);
+ raw2->totlen = (*first_raw)->totlen;
raw2->next_phys = NULL;
raw2->next_in_ino = NULL;
raw = first_raw;
while (*raw) {
- uint32_t rawlen = ref_totlen(c, jeb, *raw);
-
D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
- rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
+ (*raw)->totlen, ref_offset(*raw), ref_flags(*raw), ofs));
if (ref_obsolete(*raw)) {
/* Shouldn't really happen much */
- new_jeb->dirty_size += rawlen;
- new_jeb->free_size -= rawlen;
- c->dirty_size += rawlen;
+ new_jeb->dirty_size += (*raw)->totlen;
+ new_jeb->free_size -= (*raw)->totlen;
+ c->dirty_size += (*raw)->totlen;
} else {
- new_jeb->used_size += rawlen;
- new_jeb->free_size -= rawlen;
- jeb->dirty_size += rawlen;
- jeb->used_size -= rawlen;
- c->dirty_size += rawlen;
+ new_jeb->used_size += (*raw)->totlen;
+ new_jeb->free_size -= (*raw)->totlen;
+ jeb->dirty_size += (*raw)->totlen;
+ jeb->used_size -= (*raw)->totlen;
+ c->dirty_size += (*raw)->totlen;
}
- c->free_size -= rawlen;
+ c->free_size -= (*raw)->totlen;
(*raw)->flash_offset = ofs | ref_flags(*raw);
- ofs += rawlen;
+ ofs += (*raw)->totlen;
new_jeb->last_node = *raw;
raw = &(*raw)->next_phys;
padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
- } else {
- /* Pad with JFFS2_DIRTY_BITMASK */
- memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
}
}
/* else jffs2_flash_writev has actually filled in the rest of the
return ret;
}
- spin_lock(&c->erase_completion_lock);
-
- /* Adjust free size of the block if we padded. */
- if (pad) {
- struct jffs2_eraseblock *jeb;
-
- jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
-
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
- (jeb==c->nextblock)?"next":"", jeb->offset));
-
+ /* Adjusting free size of next block only, if it's called from fsync ! */
+ if (pad == 2) {
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of c->nextblock\n"));
+ spin_lock(&c->erase_completion_lock);
+ if (!c->nextblock)
+ BUG();
/* wbuf_pagesize - wbuf_len is the amount of space that's to be
padded. If there is less free space in the block than that,
something screwed up */
- if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
+ if (c->nextblock->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
- jeb->offset, jeb->free_size);
+ c->nextblock->offset, c->nextblock->free_size);
BUG();
}
- jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+ c->nextblock->free_size -= (c->wbuf_pagesize - c->wbuf_len);
c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
- jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+ c->nextblock->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+ spin_unlock(&c->erase_completion_lock);
}
/* Stick any now-obsoleted blocks on the erase_pending_list */
+ spin_lock(&c->erase_completion_lock);
jffs2_refile_wbuf_blocks(c);
jffs2_clear_wbuf_ino_list(c);
spin_unlock(&c->erase_completion_lock);
old_wbuf_ofs = c->wbuf_ofs;
old_wbuf_len = c->wbuf_len;
- if (c->unchecked_size) {
- /* GC won't make any progress for a while */
- D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
- ret = __jffs2_flush_wbuf(c, 2);
- } else while (old_wbuf_len &&
- old_wbuf_ofs == c->wbuf_ofs) {
+ while (old_wbuf_len &&
+ old_wbuf_ofs == c->wbuf_ofs) {
up(&c->alloc_sem);
size_t retlen;
int oob_size;
- /* allocate a buffer for all oob data in this sector */
oob_size = c->mtd->oobsize;
+
+ /* allocate a buffer for all oob data in this sector */
len = 4 * oob_size;
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
goto out;
}
- /* Special check for first page */
- for(i = 0; i < oob_size ; i++) {
- /* Yeah, we know about the cleanmarker. */
- if (mode && i >= c->fsdata_pos &&
- i < c->fsdata_pos + c->fsdata_len)
- continue;
-
- if (buf[i] != 0xFF) {
- D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
- buf[page+i], page+i, jeb->offset));
- ret = 1;
+ /* Special check for first two pages */
+ for (page = 0; page < 2 * oob_size; page += oob_size) {
+ /* Check for bad block marker */
+ if (buf[page+c->badblock_pos] != 0xff) {
+ D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Bad or failed block at %08x\n",jeb->offset));
+ /* Return 2 for bad and 3 for failed block
+ bad goes to list_bad and failed to list_erase */
+ ret = (!page) ? 2 : 3;
goto out;
}
- }
+ for(i = 0; i < oob_size ; i++) {
+ /* Yeah, we know about the cleanmarker. */
+ if (mode && i >= c->fsdata_pos &&
+ i < c->fsdata_pos+c->fsdata_len)
+ continue;
+
+ if (buf[page+i] != 0xFF) {
+ D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
+ buf[page+i], page+i, jeb->offset));
+ ret = 1;
+ goto out;
+ }
+ }
+ /* only the first page can contain a cleanmarker !*/
+ mode = 0;
+ }
/* we know, we are aligned :) */
- for (page = oob_size; page < len; page += sizeof(long)) {
+ for (; page < len; page += sizeof(long)) {
unsigned long dat = *(unsigned long *)(&buf[page]);
if(dat != -1) {
ret = 1;
int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
struct jffs2_unknown_node n;
- unsigned char buf[2 * NAND_MAX_OOBSIZE];
+ unsigned char buf[32];
unsigned char *p;
int ret, i, cnt, retval = 0;
size_t retlen, offset;
/* Loop through the physical blocks */
for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
- /* Check first if the block is bad. */
- if (c->mtd->block_isbad (c->mtd, offset)) {
- D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
- return 2;
- }
/*
* We read oob data from page 0 and 1 of the block.
* page 0 contains cleanmarker and badblock info
return -EIO;
}
+ /* Check for bad block marker */
+ if (buf[c->badblock_pos] != 0xff) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x (has %02x %02x in badblock_pos %d\n",
+ jeb->offset, buf[c->badblock_pos], buf[c->badblock_pos + oob_size], c->badblock_pos));
+ return 2;
+ }
+
+ /* Check for failure counter in the second page */
+ if (buf[c->badblock_pos + oob_size] != 0xff) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Block marked as failed at %08x, fail count:%d\n", jeb->offset, buf[c->badblock_pos + oob_size]));
+ return 3;
+ }
+
/* Check cleanmarker only on the first physical block */
if (!cnt) {
n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
}
/*
- * On NAND we try to mark this block bad. If the block was erased more
- * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * We try to get the failure count of this block.
+ */
+int jffs2_nand_read_failcnt(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) {
+
+ unsigned char buf[16];
+ int ret;
+ size_t retlen;
+ int oob_size;
+
+ oob_size = c->mtd->oobsize;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset + c->mtd->oobblock, oob_size , &retlen, buf);
+
+ if (ret) {
+ D1(printk(KERN_WARNING "jffs2_nand_read_failcnt(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+ return ret;
+ }
+
+ if (retlen < oob_size) {
+ D1(printk(KERN_WARNING "jffs2_nand_read_failcnt(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size, jeb->offset));
+ return -EIO;
+ }
+
+ jeb->bad_count = buf[c->badblock_pos];
+ return 0;
+}
+
+/*
+ * On NAND we try to mark this block bad. We try to write how often
+ * the block was erased and mark it finaly bad, if the count
+ * is > MAX_ERASE_FAILURES. We read this information on mount !
+ * jeb->bad_count contains the count before this erase.
* Don't care about failures. This block remains on the erase-pending
* or badblock list as long as nobody manipulates the flash with
* a bootloader or something like that.
*/
-int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
+ unsigned char buf = 0x0;
int ret;
+ size_t retlen;
/* if the count is < max, we try to write the counter to the 2nd page oob area */
- if( ++jeb->bad_count < MAX_ERASE_FAILURES)
- return 0;
-
- if (!c->mtd->block_markbad)
- return 1; // What else can we do?
-
- D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
- ret = c->mtd->block_markbad(c->mtd, bad_offset);
+ if( ++jeb->bad_count < MAX_ERASE_FAILURES) {
+ buf = (unsigned char)jeb->bad_count;
+ c->badblock_pos += c->mtd->oobblock;
+ }
+
+ ret = jffs2_flash_write_oob(c, jeb->offset + c->badblock_pos, 1, &retlen, &buf);
if (ret) {
D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
return ret;
}
- return 1;
+ if (retlen != 1) {
+ D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Short write for block at %08x: %zd not 1\n", jeb->offset, retlen));
+ return ret;
+ }
+ return 0;
}
+#define JFFS2_OOB_ECCPOS0 0
+#define JFFS2_OOB_ECCPOS1 1
+#define JFFS2_OOB_ECCPOS2 2
+#define JFFS2_OOB_ECCPOS3 3
+#define JFFS2_OOB_ECCPOS4 6
+#define JFFS2_OOB_ECCPOS5 7
+
+#define NAND_JFFS2_OOB8_FSDAPOS 6
+#define NAND_JFFS2_OOB16_FSDAPOS 8
+#define NAND_JFFS2_OOB8_FSDALEN 2
#define NAND_JFFS2_OOB16_FSDALEN 8
+static struct nand_oobinfo jffs2_oobinfo_swecc = {
+ .useecc = 1,
+ .eccpos = {JFFS2_OOB_ECCPOS0, JFFS2_OOB_ECCPOS1, JFFS2_OOB_ECCPOS2,
+ JFFS2_OOB_ECCPOS3, JFFS2_OOB_ECCPOS4, JFFS2_OOB_ECCPOS5}
+};
+
static struct nand_oobinfo jffs2_oobinfo_docecc = {
- .useecc = MTD_NANDECC_PLACE,
- .eccbytes = 6,
+ .useecc = 1,
.eccpos = {0,1,2,3,4,5}
};
-int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
-{
- struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
-
- /* Do this only, if we have an oob buffer */
- if (!c->mtd->oobsize)
- return 0;
-
- /* Cleanmarker is out-of-band, so inline size zero */
- c->cleanmarker_size = 0;
-
- /* Should we use autoplacement ? */
- if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
- D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
- /* Get the position of the free bytes */
- if (!oinfo->oobfree[0][0]) {
- printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
- return -ENOSPC;
- }
- c->fsdata_pos = oinfo->oobfree[0][0];
- c->fsdata_len = oinfo->oobfree[0][1];
- if (c->fsdata_len > 8)
- c->fsdata_len = 8;
- } else {
- /* This is just a legacy fallback and should go away soon */
- switch(c->mtd->ecctype) {
- case MTD_ECC_RS_DiskOnChip:
- printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
- c->oobinfo = &jffs2_oobinfo_docecc;
- c->fsdata_pos = 6;
- c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
- c->badblock_pos = 15;
- break;
-
- default:
- D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
- return -EINVAL;
- }
- }
- return 0;
-}
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
{
- int res;
+ /* Cleanmarker is out-of-band, so inline size zero */
+ c->cleanmarker_size = 0;
/* Initialise write buffer */
c->wbuf_pagesize = c->mtd->oobblock;
c->wbuf_ofs = 0xFFFFFFFF;
-
+ /* FIXME: If we had a generic way of describing the hardware's
+ use of OOB area, we could perhaps make this generic too. */
+ switch(c->mtd->ecctype) {
+ case MTD_ECC_SW:
+ D1(printk(KERN_DEBUG "JFFS2 using software ECC\n"));
+ c->oobinfo = &jffs2_oobinfo_swecc;
+ if (c->mtd->oobsize == 8) {
+ c->fsdata_pos = NAND_JFFS2_OOB8_FSDAPOS;
+ c->fsdata_len = NAND_JFFS2_OOB8_FSDALEN;
+ } else {
+ c->fsdata_pos = NAND_JFFS2_OOB16_FSDAPOS;
+ c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
+ }
+ c->badblock_pos = NAND_BADBLOCK_POS;
+ break;
+
+ case MTD_ECC_RS_DiskOnChip:
+ D1(printk(KERN_DEBUG "JFFS2 using DiskOnChip hardware ECC\n"));
+ c->oobinfo = &jffs2_oobinfo_docecc;
+ c->fsdata_pos = 6;
+ c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
+ c->badblock_pos = 15;
+ break;
+
+ default:
+ printk("JFFS2 doesn't yet know how to handle ECC type %d\n",
+ c->mtd->ecctype);
+ return -EINVAL;
+ }
+
c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
if (!c->wbuf)
return -ENOMEM;
- res = jffs2_nand_set_oobinfo(c);
-
#ifdef BREAKME
if (!brokenbuf)
brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
}
memset(brokenbuf, 0xdb, c->wbuf_pagesize);
#endif
- return res;
+ return 0;
}
void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: write.c,v 1.85 2004/07/13 08:58:25 dwmw2 Exp $
+ * $Id: write.c,v 1.75 2003/10/08 11:45:11 dwmw2 Exp $
*
*/
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-#include "compr.h"
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri)
memset(ic, 0, sizeof(*ic));
+ init_MUTEX_LOCKED(&f->sem);
f->inocache = ic;
f->inocache->nlink = 1;
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
fn->raw = raw;
raw->flash_offset = flash_ofs;
- raw->__totlen = PAD(sizeof(*ri)+datalen);
+ raw->totlen = PAD(sizeof(*ri)+datalen);
raw->next_phys = NULL;
ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen,
fd->raw = raw;
raw->flash_offset = flash_ofs;
- raw->__totlen = PAD(sizeof(*rd)+namelen);
+ raw->totlen = PAD(sizeof(*rd)+namelen);
raw->next_phys = NULL;
ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
- (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
+ (alloc_mode==ALLOC_GC)?0:fd->ino);
if (ret || (retlen != sizeof(*rd) + namelen)) {
printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*rd)+namelen, flash_ofs, ret, retlen);
while(writelen) {
struct jffs2_full_dnode *fn;
unsigned char *comprbuf = NULL;
- uint16_t comprtype = JFFS2_COMPR_NONE;
+ unsigned char comprtype = JFFS2_COMPR_NONE;
uint32_t phys_ofs, alloclen;
uint32_t datalen, cdatalen;
int retried = 0;
break;
}
down(&f->sem);
- datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
- cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
+ datalen = writelen;
+ cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), writelen);
- comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
+ comprbuf = kmalloc(cdatalen, GFP_KERNEL);
+ if (comprbuf) {
+ comprtype = jffs2_compress(buf, comprbuf, &datalen, &cdatalen);
+ }
+ if (comprtype == JFFS2_COMPR_NONE) {
+ /* Either compression failed, or the allocation of comprbuf failed */
+ if (comprbuf)
+ kfree(comprbuf);
+ comprbuf = buf;
+ datalen = cdatalen;
+ }
+ /* Now comprbuf points to the data to be written, be it compressed or not.
+ comprtype holds the compression type, and comprtype == JFFS2_COMPR_NONE means
+ that the comprbuf doesn't need to be kfree()d.
+ */
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri->offset = cpu_to_je32(offset);
ri->csize = cpu_to_je32(cdatalen);
ri->dsize = cpu_to_je32(datalen);
- ri->compr = comprtype & 0xff;
- ri->usercompr = (comprtype >> 8 ) & 0xff;
+ ri->compr = comprtype;
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY);
- jffs2_free_comprbuf(comprbuf, buf);
+ if (comprtype != JFFS2_COMPR_NONE)
+ kfree(comprbuf);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
uint32_t alloclen, phys_ofs;
int ret;
- if (1 /* alternative branch needs testing */ ||
- !jffs2_can_mark_obsolete(c)) {
- /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
-
- rd = jffs2_alloc_raw_dirent();
- if (!rd)
- return -ENOMEM;
-
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION);
- if (ret) {
- jffs2_free_raw_dirent(rd);
- return ret;
- }
-
- down(&dir_f->sem);
+ rd = jffs2_alloc_raw_dirent();
+ if (!rd)
+ return -ENOMEM;
- /* Build a deletion node */
- rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
- rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
- rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
- rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
-
- rd->pino = cpu_to_je32(dir_f->inocache->ino);
- rd->version = cpu_to_je32(++dir_f->highest_version);
- rd->ino = cpu_to_je32(0);
- rd->mctime = cpu_to_je32(get_seconds());
- rd->nsize = namelen;
- rd->type = DT_UNKNOWN;
- rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
- rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
-
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
-
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION);
+ if (ret) {
jffs2_free_raw_dirent(rd);
+ return ret;
+ }
- if (IS_ERR(fd)) {
- jffs2_complete_reservation(c);
- up(&dir_f->sem);
- return PTR_ERR(fd);
- }
-
- /* File it. This will mark the old one obsolete. */
- jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
- } else {
- struct jffs2_full_dirent **prev = &dir_f->dents;
- uint32_t nhash = full_name_hash(name, namelen);
+ down(&dir_f->sem);
- down(&dir_f->sem);
+ /* Build a deletion node */
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
- while ((*prev) && (*prev)->nhash <= nhash) {
- if ((*prev)->nhash == nhash &&
- !memcmp((*prev)->name, name, namelen) &&
- !(*prev)->name[namelen]) {
- struct jffs2_full_dirent *this = *prev;
+ rd->pino = cpu_to_je32(dir_f->inocache->ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(0);
+ rd->mctime = cpu_to_je32(get_seconds());
+ rd->nsize = namelen;
+ rd->type = DT_UNKNOWN;
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
- D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
- this->ino, ref_offset(this->raw)));
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
+
+ jffs2_free_raw_dirent(rd);
- *prev = this->next;
- jffs2_mark_node_obsolete(c, (this->raw));
- jffs2_free_full_dirent(this);
- break;
- }
- prev = &((*prev)->next);
- }
+ if (IS_ERR(fd)) {
+ jffs2_complete_reservation(c);
up(&dir_f->sem);
+ return PTR_ERR(fd);
}
+ /* File it. This will mark the old one obsolete. */
+ jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+ up(&dir_f->sem);
+
/* dead_f is NULL if this was a rename not a real unlink */
/* Also catch the !f->inocache case, where there was a dirent
pointing to an inode which didn't exist. */
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: writev.c,v 1.5 2004/07/13 08:58:25 dwmw2 Exp $
+ * $Id: writev.c,v 1.4 2003/10/04 08:33:07 dwmw2 Exp $
*
*/
/*
* Nobody gets write access to a read-only fs.
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
uid = le32_to_cpu(dip->di_uid);
gid = le32_to_cpu(dip->di_gid);
- ip->i_uid = INOXID_UID(XID_TAG(ip), uid, gid);
- ip->i_gid = INOXID_GID(XID_TAG(ip), uid, gid);
- ip->i_xid = INOXID_XID(XID_TAG(ip), uid, gid, 0);
+ ip->i_uid = INOXID_UID(uid, gid);
+ ip->i_gid = INOXID_GID(uid, gid);
+ ip->i_xid = INOXID_XID(uid, gid, 0);
ip->i_size = le64_to_cpu(dip->di_size);
ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
dip->di_nlink = cpu_to_le32(ip->i_nlink);
- uid = XIDINO_UID(XID_TAG(ip), ip->i_uid, ip->i_xid);
- gid = XIDINO_GID(XID_TAG(ip), ip->i_gid, ip->i_xid);
+ uid = XIDINO_UID(ip->i_uid, ip->i_xid);
+ gid = XIDINO_GID(ip->i_gid, ip->i_xid);
dip->di_uid = cpu_to_le32(uid);
dip->di_gid = cpu_to_le32(gid);
/*
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/vfs.h>
-#include <asm/uaccess.h>
int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
mntput(mnt);
}
-ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
- const void *from, size_t available)
-{
- loff_t pos = *ppos;
- if (pos < 0)
- return -EINVAL;
- if (pos >= available)
- return 0;
- if (count > available - pos)
- count = available - pos;
- if (copy_to_user(to, from + pos, count))
- return -EFAULT;
- *ppos = pos + count;
- return count;
-}
-
EXPORT_SYMBOL(dcache_dir_close);
EXPORT_SYMBOL(dcache_dir_lseek);
EXPORT_SYMBOL(dcache_dir_open);
EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
- * See 'Documentation/mandatory.txt' for details.
+ * See 'linux/Documentation/mandatory.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
bh = bh->b_this_page;
} while (bh != head);
- /*
- * we cannot drop the bh if the page is not uptodate
- * or a concurrent readpage would fail to serialize with the bh
- * and it would read from disk before we reach the platter.
- */
- if (buffer_heads_over_limit && PageUptodate(page))
+ if (buffer_heads_over_limit)
try_to_free_buffers(page);
}
{
umode_t mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN|VX_WATCH))
return -EACCES;
if (mask & MAY_WRITE) {
return -EACCES;
}
+static inline int xid_permission(struct inode *inode)
+{
+ if (inode->i_xid == 0)
+ return 0;
+ if (vx_check(inode->i_xid, VX_ADMIN|VX_WATCH|VX_IDENT))
+ return 0;
+ return -EACCES;
+}
+
int permission(struct inode * inode,int mask, struct nameidata *nd)
{
int retval;
int submask;
- umode_t mode = inode->i_mode;
/* Ordinary permission routines do not understand MAY_APPEND. */
submask = mask & ~MAY_APPEND;
- if (nd && (mask & MAY_WRITE) && MNT_IS_RDONLY(nd->mnt) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
-
+ if ((retval = xid_permission(inode)))
+ return retval;
if (inode->i_op && inode->i_op->permission)
retval = inode->i_op->permission(inode, submask, nd);
else
{
struct path next;
struct inode *inode;
- int err, atomic;
+ int err;
unsigned int lookup_flags = nd->flags;
-
- atomic = (lookup_flags & LOOKUP_ATOMIC);
-
+
while (*name=='/')
name++;
if (!*name)
if (err < 0)
break;
}
- err = -EWOULDBLOCKIO;
- if (atomic)
- break;
nd->flags |= LOOKUP_CONTINUE;
/* This does the actual lookups.. */
err = do_lookup(nd, &this, &next);
if (err < 0)
break;
}
- err = -EWOULDBLOCKIO;
- if (atomic)
- break;
err = do_lookup(nd, &this, &next);
if (err)
break;
return permission(dir,MAY_WRITE | MAY_EXEC, nd);
}
-static inline int mnt_may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child) {
- if (child->d_inode)
- return -EEXIST;
- if (IS_DEADDIR(dir))
- return -ENOENT;
- if (mnt->mnt_flags & MNT_RDONLY)
- return -EROFS;
- return 0;
-}
-
-static inline int mnt_may_unlink(struct vfsmount *mnt, struct inode *dir, struct dentry *child) {
- if (!child->d_inode)
- return -ENOENT;
- if (mnt->mnt_flags & MNT_RDONLY)
- return -EROFS;
- return 0;
-}
-
/*
* Special case: O_CREAT|O_EXCL implies O_NOFOLLOW for security
* reasons.
if (f & O_DIRECTORY)
retval |= LOOKUP_DIRECTORY;
- if (f & O_ATOMICLOOKUP)
- retval |= LOOKUP_ATOMIC;
return retval;
}
return -EACCES;
flag &= ~O_TRUNC;
- } else if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt)))
- && (flag & FMODE_WRITE))
+ } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
return -EROFS;
/*
* An append-only file must be opened in append mode for writing.
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry;
- int error;
down(&nd->dentry->d_inode->i_sem);
- error = -EEXIST;
+ dentry = ERR_PTR(-EEXIST);
if (nd->last_type != LAST_NORM)
- goto out;
+ goto fail;
nd->flags &= ~LOOKUP_PARENT;
dentry = lookup_hash(&nd->last, nd->dentry);
if (IS_ERR(dentry))
- goto ret;
- error = mnt_may_create(nd->mnt, nd->dentry->d_inode, dentry);
- if (error)
goto fail;
- error = -ENOENT;
if (!is_dir && nd->last.name[nd->last.len] && !dentry->d_inode)
- goto fail;
-ret:
+ goto enoent;
return dentry;
-fail:
+enoent:
dput(dentry);
-out:
- return ERR_PTR(error);
+ dentry = ERR_PTR(-ENOENT);
+fail:
+ return dentry;
}
int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
dentry = lookup_hash(&nd.last, nd.dentry);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
- error = mnt_may_unlink(nd.mnt, nd.dentry->d_inode, dentry);
- if (error)
- goto exit2;
error = vfs_rmdir(nd.dentry->d_inode, dentry);
- exit2:
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
/* Why not before? Because we want correct error value */
if (nd.last.name[nd.last.len])
goto slashes;
- error = mnt_may_unlink(nd.mnt, nd.dentry->d_inode, dentry);
- if (error)
- goto exit2;
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
error = path_lookup(to, LOOKUP_PARENT, &nd);
if (error)
goto out;
- /*
- * We allow hard-links to be created to a bind-mount as long
- * as the bind-mount is not read-only. Checking for cross-dev
- * links is subsumed by the superblock check in vfs_link().
- */
- error = -EROFS;
- if (MNT_IS_RDONLY(old_nd.mnt))
+ error = -EXDEV;
+ if (old_nd.mnt != nd.mnt)
goto out_release;
new_dentry = lookup_create(&nd, 0);
error = PTR_ERR(new_dentry);
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
- error = -EROFS;
- if (MNT_IS_RDONLY(newnd.mnt))
- goto exit4;
new_dentry = lookup_hash(&newnd.last, new_dir);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
struct vfsmount *mnt = v;
int err = 0;
static struct proc_fs_info {
- int s_flag;
- int mnt_flag;
- char *set_str;
- char *unset_str;
+ int flag;
+ char *str;
} fs_info[] = {
- { MS_RDONLY, MNT_RDONLY, "ro", "rw" },
- { MS_SYNCHRONOUS, 0, ",sync", NULL },
- { MS_DIRSYNC, 0, ",dirsync", NULL },
- { MS_MANDLOCK, 0, ",mand", NULL },
- { MS_NOATIME, MNT_NOATIME, ",noatime", NULL },
- { MS_NODIRATIME, MNT_NODIRATIME, ",nodiratime", NULL },
- { MS_TAGXID, MS_TAGXID, ",tagxid", NULL },
- { 0, MNT_NOSUID, ",nosuid", NULL },
- { 0, MNT_NODEV, ",nodev", NULL },
- { 0, MNT_NOEXEC, ",noexec", NULL },
- { 0, 0, NULL, NULL }
+ { MS_SYNCHRONOUS, ",sync" },
+ { MS_DIRSYNC, ",dirsync" },
+ { MS_MANDLOCK, ",mand" },
+ { MS_NOATIME, ",noatime" },
+ { MS_NODIRATIME, ",nodiratime" },
+ { 0, NULL }
};
- struct proc_fs_info *p;
- unsigned long s_flags = mnt->mnt_sb->s_flags;
- int mnt_flags = mnt->mnt_flags;
+ static struct proc_fs_info mnt_info[] = {
+ { MNT_NOSUID, ",nosuid" },
+ { MNT_NODEV, ",nodev" },
+ { MNT_NOEXEC, ",noexec" },
+ { 0, NULL }
+ };
+ struct proc_fs_info *fs_infop;
if (vx_flags(VXF_HIDE_MOUNT, 0))
return 0;
seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
seq_putc(m, ' ');
mangle(m, mnt->mnt_sb->s_type->name);
- seq_putc(m, ' ');
- for (p = fs_info; (p->s_flag | p->mnt_flag) ; p++) {
- if ((s_flags & p->s_flag) || (mnt_flags & p->mnt_flag)) {
- if (p->set_str)
- seq_puts(m, p->set_str);
- } else {
- if (p->unset_str)
- seq_puts(m, p->unset_str);
- }
+ seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
+ for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_sb->s_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+ for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
}
if (mnt->mnt_sb->s_op->show_options)
err = mnt->mnt_sb->s_op->show_options(m, mnt);
/*
* do loopback mount.
*/
-static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
{
struct nameidata old_nd;
struct vfsmount *mnt = NULL;
- int recurse = flags & MS_REC;
int err = mount_is_safe(nd);
-
if (err)
return err;
if (!old_name || !*old_name)
spin_unlock(&vfsmount_lock);
} else
mntput(mnt);
- mnt->mnt_flags = mnt_flags;
}
up_write(¤t->namespace->sem);
((char *)data_page)[PAGE_SIZE - 1] = 0;
/* Separate the per-mountpoint flags */
- if (flags & MS_RDONLY)
- mnt_flags |= MNT_RDONLY;
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
- if (flags & MS_NOATIME)
- mnt_flags |= MNT_NOATIME;
- if (flags & MS_NODIRATIME)
- mnt_flags |= MNT_NODIRATIME;
flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
if (vx_ccaps(VXC_SECURE_MOUNT))
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name, flags, mnt_flags);
+ retval = do_loopback(&nd, dev_name, flags & MS_REC);
else if (flags & MS_MOVE)
retval = do_move_mount(&nd, dev_name);
else
struct namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
struct fs_struct *fs = tsk->fs;
- struct vfsmount *p, *q;
if (!namespace)
return 0;
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
spin_unlock(&vfsmount_lock);
- /*
- * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
- * as belonging to new namespace. We have already acquired a private
- * fs_struct, so tsk->fs->lock is not needed.
- */
- p = namespace->root;
- q = new_ns->root;
- while (p) {
- q->mnt_namespace = new_ns;
- if (fs) {
+ /* Second pass: switch the tsk->fs->* elements */
+ if (fs) {
+ struct vfsmount *p, *q;
+ write_lock(&fs->lock);
+
+ p = namespace->root;
+ q = new_ns->root;
+ while (p) {
if (p == fs->rootmnt) {
rootmnt = p;
fs->rootmnt = mntget(q);
altrootmnt = p;
fs->altrootmnt = mntget(q);
}
+ p = next_mnt(p, namespace->root);
+ q = next_mnt(q, new_ns->root);
}
- p = next_mnt(p, namespace->root);
- q = next_mnt(q, new_ns->root);
+ write_unlock(&fs->lock);
}
up_write(&tsk->namespace->sem);
*ppos = pos;
- if (!IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt))) {
+ if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
}
#endif
}
if (!result)
- result = inode_setattr(inode, attr);
+ inode_setattr(inode, attr);
out:
unlock_kernel();
return result;
if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff
> (1U << (32 - PAGE_SHIFT)))
return -EFBIG;
- if (!IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt))) {
+ if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
}
#include "ncpsign_kernel.h"
-static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
+static int _recv(struct socket *sock, unsigned char *ubuf, int size,
+ unsigned flags)
{
- struct msghdr msg = {NULL, };
- struct kvec iov = {buf, size};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
-}
+ struct iovec iov;
+ struct msghdr msg;
-static inline int do_send(struct socket *sock, struct kvec *vec, int count,
- int len, unsigned flags)
-{
- struct msghdr msg = { .msg_flags = flags };
- return kernel_sendmsg(sock, &msg, vec, count, len);
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ return sock_recvmsg(sock, &msg, size, flags);
}
-static int _send(struct socket *sock, const void *buff, int len)
+static inline int _send(struct socket *sock, const void *buff, int len)
{
- struct kvec vec;
- vec.iov_base = (void *) buff;
- vec.iov_len = len;
- return do_send(sock, &vec, 1, len, 0);
+ struct iovec iov;
+ struct msghdr msg;
+
+ iov.iov_base = (void *) buff;
+ iov.iov_len = len;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_flags = 0;
+
+ return sock_sendmsg(sock, &msg, len);
}
struct ncp_request_reply {
size_t datalen;
int result;
enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
- struct kvec* tx_ciov;
+ struct iovec* tx_ciov;
size_t tx_totallen;
size_t tx_iovlen;
- struct kvec tx_iov[3];
+ struct iovec tx_iov[3];
u_int16_t tx_type;
u_int32_t sign[6];
};
-void ncp_tcp_data_ready(struct sock *sk, int len)
-{
+void ncp_tcp_data_ready(struct sock *sk, int len) {
struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk, len);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_error_report(struct sock *sk)
-{
+void ncp_tcp_error_report(struct sock *sk) {
struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_write_space(struct sock *sk)
-{
+void ncp_tcp_write_space(struct sock *sk) {
struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
server->write_space(sk);
- if (server->tx.creq)
+ if (server->tx.creq) {
schedule_work(&server->tx.tq);
+ }
}
-void ncpdgram_timeout_call(unsigned long v)
-{
+void ncpdgram_timeout_call(unsigned long v) {
struct ncp_server *server = (void*)v;
schedule_work(&server->timeout_tq);
}
-static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
-{
+static inline void ncp_finish_request(struct ncp_request_reply *req, int result) {
req->result = result;
req->status = RQ_DONE;
wake_up_all(&req->wq);
}
-static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
-{
+static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err) {
struct ncp_request_reply *req;
ncp_invalidate_conn(server);
}
}
-static inline int get_conn_number(struct ncp_reply_header *rp)
-{
+static inline int get_conn_number(struct ncp_reply_header *rp) {
return rp->conn_low | (rp->conn_high << 8);
}
-static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
+static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
/* If req is done, we got signal, but we also received answer... */
switch (req->status) {
case RQ_IDLE:
}
}
-static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
+static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
down(&server->rcv.creq_sem);
__ncp_abort_request(server, req, err);
up(&server->rcv.creq_sem);
}
-static inline void __ncptcp_abort(struct ncp_server *server)
-{
+static inline void __ncptcp_abort(struct ncp_server *server) {
__abort_ncp_connection(server, NULL, 0);
}
-static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
-{
- struct kvec vec[3];
+static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) {
+ struct msghdr msg;
+ struct iovec iov[3];
+
/* sock_sendmsg updates iov pointers for us :-( */
- memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
- return do_send(sock, vec, req->tx_iovlen,
- req->tx_totallen, MSG_DONTWAIT);
+ memcpy(iov, req->tx_ciov, req->tx_iovlen * sizeof(iov[0]));
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = req->tx_iovlen;
+ msg.msg_flags = MSG_DONTWAIT;
+ return sock_sendmsg(sock, &msg, req->tx_totallen);
}
-static void __ncptcp_try_send(struct ncp_server *server)
-{
+static void __ncptcp_try_send(struct ncp_server *server) {
struct ncp_request_reply *rq;
- struct kvec *iov;
- struct kvec iovc[3];
+ struct msghdr msg;
+ struct iovec* iov;
+ struct iovec iovc[3];
int result;
rq = server->tx.creq;
- if (!rq)
+ if (!rq) {
return;
+ }
/* sock_sendmsg updates iov pointers for us :-( */
memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
- result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
- rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
-
- if (result == -EAGAIN)
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iovc;
+ msg.msg_iovlen = rq->tx_iovlen;
+ msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+ result = sock_sendmsg(server->ncp_sock, &msg, rq->tx_totallen);
+ if (result == -EAGAIN) {
return;
-
+ }
if (result < 0) {
printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
__ncp_abort_request(server, rq, result);
rq->tx_ciov = iov;
}
-static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
-{
+static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) {
req->status = RQ_INPROGRESS;
h->conn_low = server->connection;
h->conn_high = server->connection >> 8;
h->sequence = ++server->sequence;
}
-static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
size_t signlen;
struct ncp_request_header* h;
#define NCP_TCP_XMIT_VERSION (1)
#define NCP_TCP_RCVD_MAGIC (0x744E6350)
-static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
size_t signlen;
struct ncp_request_header* h;
__ncptcp_try_send(server);
}
-static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
ncpdgram_start_request(server, req);
}
-static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) {
down(&server->rcv.creq_sem);
if (!ncp_conn_valid(server)) {
up(&server->rcv.creq_sem);
return 0;
}
-static void __ncp_next_request(struct ncp_server *server)
-{
+static void __ncp_next_request(struct ncp_server *server) {
struct ncp_request_reply *req;
server->rcv.creq = NULL;
__ncp_start_request(server, req);
}
-static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
-{
+static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) {
if (server->info_sock) {
- struct kvec iov[2];
+ struct iovec iov[2];
+ struct msghdr msg;
__u32 hdr[2];
hdr[0] = cpu_to_be32(len + 8);
iov[1].iov_base = (void *) data;
iov[1].iov_len = len;
- do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 2;
+ msg.msg_flags = MSG_NOSIGNAL;
+
+ sock_sendmsg(server->info_sock, &msg, len + 8);
}
}
-void ncpdgram_rcv_proc(void *s)
-{
+static void __ncpdgram_rcv_proc(void *s) {
struct ncp_server *server = s;
struct socket* sock;
struct ncp_reply_header reply;
int result;
- result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
+ result = _recv(sock, (void*)&reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
if (result < 0) {
break;
}
up(&server->rcv.creq_sem);
}
drop:;
- _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
+ _recv(sock, (void*)&reply, sizeof(reply), MSG_DONTWAIT);
}
}
-static void __ncpdgram_timeout_proc(struct ncp_server *server)
-{
+void ncpdgram_rcv_proc(void *s) {
+ mm_segment_t fs;
+ struct ncp_server *server = s;
+
+ fs = get_fs();
+ set_fs(get_ds());
+ __ncpdgram_rcv_proc(server);
+ set_fs(fs);
+}
+
+static void __ncpdgram_timeout_proc(struct ncp_server *server) {
/* If timer is pending, we are processing another request... */
if (!timer_pending(&server->timeout_tm)) {
struct ncp_request_reply* req;
}
}
-void ncpdgram_timeout_proc(void *s)
-{
+void ncpdgram_timeout_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncpdgram_timeout_proc(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
}
-static inline void ncp_init_req(struct ncp_request_reply* req)
-{
+static inline void ncp_init_req(struct ncp_request_reply* req) {
init_waitqueue_head(&req->wq);
req->status = RQ_IDLE;
}
-static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
-{
+static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) {
int result;
if (buffer) {
return result;
}
-static int __ncptcp_rcv_proc(struct ncp_server *server)
-{
+static int __ncptcp_rcv_proc(struct ncp_server *server) {
/* We have to check the result, so store the complete header */
while (1) {
int result;
}
}
-void ncp_tcp_rcv_proc(void *s)
-{
+void ncp_tcp_rcv_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_rcv_proc(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
+ return;
}
-void ncp_tcp_tx_proc(void *s)
-{
+void ncp_tcp_tx_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_try_send(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
+ return;
}
static int do_ncp_rpc_call(struct ncp_server *server, int size,
ncp_init_req(&req);
req.reply_buf = reply_buf;
req.datalen = max_reply_size;
- req.tx_iov[1].iov_base = server->packet;
+ req.tx_iov[1].iov_base = (void *) server->packet;
req.tx_iov[1].iov_len = size;
req.tx_iovlen = 1;
req.tx_totallen = size;
return -EIO;
}
{
+ mm_segment_t fs;
sigset_t old_set;
unsigned long mask, flags;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ fs = get_fs();
+ set_fs(get_ds());
+
result = do_ncp_rpc_call(server, size, reply, max_reply_size);
+ set_fs(fs);
+
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
if (nd->flags & LOOKUP_DIRECTORY)
return 0;
/* Are we trying to write to a read only partition? */
- if ((IS_RDONLY(dir) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
- (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ if (IS_RDONLY(dir) && (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
return 0;
return 1;
}
* Nobody gets write access to a read-only fs.
*
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
#define NFSDBG_FACILITY NFSDBG_FILE
+static long nfs_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
static int nfs_file_open(struct inode *, struct file *);
static int nfs_file_release(struct inode *, struct file *);
static int nfs_file_mmap(struct file *, struct vm_area_struct *);
static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
static int nfs_file_flush(struct file *);
static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
-static int nfs_check_flags(int flags);
struct file_operations nfs_file_operations = {
.llseek = remote_llseek,
.fsync = nfs_fsync,
.lock = nfs_lock,
.sendfile = nfs_file_sendfile,
- .check_flags = nfs_check_flags,
+ .fcntl = nfs_file_fcntl,
};
struct inode_operations nfs_file_inode_operations = {
# define IS_SWAPFILE(inode) (0)
#endif
-static int nfs_check_flags(int flags)
+#define nfs_invalid_flags (O_APPEND | O_DIRECT)
+
+/*
+ * Check for special cases that NFS doesn't support, and
+ * pass the rest to the generic fcntl function.
+ */
+static long
+nfs_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
{
- if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
- return -EINVAL;
+ switch (cmd) {
+ case F_SETFL:
+ if ((filp->f_flags & nfs_invalid_flags) == nfs_invalid_flags)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
- return 0;
+ return generic_file_fcntl(fd, cmd, arg, filp);
}
/*
{
struct nfs_server *server = NFS_SERVER(inode);
int (*open)(struct inode *, struct file *);
- int res;
+ int res = 0;
- res = nfs_check_flags(filp->f_flags);
- if (res)
- return res;
+ if ((filp->f_flags & nfs_invalid_flags) == nfs_invalid_flags)
+ return -EINVAL;
lock_kernel();
/* Do NFSv4 open() call */
}
static ssize_t
-nfs_file_sendfile(struct file *f
+nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
+ read_actor_t actor, void *target)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t res;
+
+ dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!res)
+ res = generic_file_sendfile(filp, ppos, count, actor, target);
+ return res;
+}
+
+static int
+nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ int status;
+
+ dfprintk(VFS, "nfs: mmap(%s/%s)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ status = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!status)
+ status = generic_file_mmap(file, vma);
+ return status;
+}
+
+/*
+ * Flush any dirty pages for this process, and check for write errors.
+ * The return status from this call provides a reliable indication of
+ * whether any write errors occurred for this process.
+ */
+static int
+nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ struct inode *inode = dentry->d_inode;
+ int status;
+
+ dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
+
+ lock_kernel();
+ status = nfs_wb_all(inode);
+ if (!status) {
+ status = file->f_error;
+ file->f_error = 0;
+ }
+ unlock_kernel();
+ return status;
+}
+
+/*
+ * This does the "real" work of the write. The generic routine has
+ * allocated the page, locked it, done all the page alignment stuff
+ * calculations etc. Now we should just copy the data from user
+ * space and write it back to the real medium..
+ *
+ * If the writer ends up delaying the write, the writer needs to
+ * increment the page use counts until he is done with the page.
+ */
+static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+ return nfs_flush_incompatible(file, page);
+}
+
+static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+ long status;
+
+ lock_kernel();
+ status = nfs_updatepage(file, page, offset, to-offset);
+ unlock_kernel();
+ return status;
+}
+
+struct address_space_operations nfs_file_aops = {
+ .readpage = nfs_readpage,
+ .readpages = nfs_readpages,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepage = nfs_writepage,
+ .writepages = nfs_writepages,
+ .prepare_write = nfs_prepare_write,
+ .commit_write = nfs_commit_write,
+#ifdef CONFIG_NFS_DIRECTIO
+ .direct_IO = nfs_direct_IO,
+#endif
+};
+
+/*
+ * Write to a file (through the page cache).
+ */
+static ssize_t
+nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+{
+ struct dentry * dentry = iocb->ki_filp->f_dentry;
+ struct inode * inode = dentry->d_inode;
+ ssize_t result;
+
+#ifdef CONFIG_NFS_DIRECTIO
+ if (iocb->ki_filp->f_flags & O_DIRECT)
+ return nfs_file_direct_write(iocb, buf, count, pos);
+#endif
+
+ dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ inode->i_ino, (unsigned long) count, (unsigned long) pos);
+
+ result = -EBUSY;
+ if (IS_SWAPFILE(inode))
+ goto out_swapfile;
+ result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (result)
+ goto out;
+
+ result = count;
+ if (!count)
+ goto out;
+
+ result = generic_file_aio_write(iocb, buf, count, pos);
+out:
+ return result;
+
+out_swapfile:
+ printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
+ goto out;
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+int
+nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode * inode = filp->f_mapping->host;
+ int status = 0;
+ int status2;
+
+ dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
+ inode->i_sb->s_id, inode->i_ino,
+ fl->fl_type, fl->fl_flags,
+ (long long)fl->fl_start, (long long)fl->fl_end);
+
+ if (!inode)
+ return -EINVAL;
+
+ /* No mandatory locks over NFS */
+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return -ENOLCK;
+
+ if (NFS_PROTO(inode)->version != 4) {
+ /* Fake OK code if mounted without NLM support */
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) {
if (IS_GETLK(cmd))
status = LOCK_USE_CLNT;
goto out_ok;
#include <linux/mount.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
-#include <linux/vserver/xid.h>
#include <asm/system.h>
#include <asm/uaccess.h>
}
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
- if (server->flags & NFS_MOUNT_TAGXID)
- sb->s_flags |= MS_TAGXID;
-
sb->s_maxbytes = fsinfo.maxfilesize;
if (sb->s_maxbytes > MAX_LFS_FILESIZE)
sb->s_maxbytes = MAX_LFS_FILESIZE;
clnt->cl_intr = (server->flags & NFS_MOUNT_INTR) ? 1 : 0;
clnt->cl_softrtry = (server->flags & NFS_MOUNT_SOFT) ? 1 : 0;
clnt->cl_droppriv = (server->flags & NFS_MOUNT_BROKEN_SUID) ? 1 : 0;
- clnt->cl_tagxid = (server->flags & NFS_MOUNT_TAGXID) ? 1 : 0;
clnt->cl_chatty = 1;
return clnt;
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", ",lock" },
{ NFS_MOUNT_BROKEN_SUID, ",broken_suid", "" },
- { NFS_MOUNT_TAGXID, ",tagxid", "" },
{ 0, NULL, NULL }
};
struct proc_nfs_info *nfs_infop;
nfsi->change_attr = fattr->change_attr;
inode->i_size = nfs_size_to_loff_t(fattr->size);
inode->i_nlink = fattr->nlink;
- inode->i_uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
- /* maybe fattr->xid someday */
+ inode->i_uid = fattr->uid;
+ inode->i_gid = fattr->gid;
if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
/*
* report the blocks in 512byte units
out:
return inode;
-/*
-fail_dlim:
- make_bad_inode(inode);
- iput(inode);
- inode = NULL;
-*/
+
out_no_inode:
printk("nfs_fhget: iget failed\n");
goto out;
}
-#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_XID|ATTR_SIZE|\
- ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET)
+#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET)
int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_uid = attr->ia_uid;
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
- if ((attr->ia_valid & ATTR_XID) != 0)
- inode->i_xid = attr->ia_xid;
if ((attr->ia_valid & ATTR_SIZE) != 0) {
inode->i_size = attr->ia_size;
vmtruncate(inode, attr->ia_size);
struct nfs_inode *nfsi = NFS_I(inode);
loff_t cur_size, new_isize;
int data_unstable;
- uid_t uid;
- gid_t gid;
- xid_t xid = 0;
/* Are we in the process of updating data on the server? */
data_unstable = nfs_caches_unstable(inode);
} else if (S_ISREG(inode->i_mode) && new_isize > cur_size)
nfsi->flags |= NFS_INO_INVALID_ATTR;
- uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
-
/* Have any file permissions changed? */
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
- || inode->i_uid != uid
- || inode->i_gid != gid
- || inode->i_xid != xid)
+ || inode->i_uid != fattr->uid
+ || inode->i_gid != fattr->gid)
nfsi->flags |= NFS_INO_INVALID_ATTR;
/* Has the link count changed? */
unsigned int invalid = 0;
loff_t cur_isize;
int data_unstable;
- uid_t uid;
- gid_t gid;
- xid_t xid = 0;
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
__FUNCTION__, inode->i_sb->s_id, inode->i_ino,
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
- uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
-
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) ||
- inode->i_uid != uid ||
- inode->i_gid != gid ||
- inode->i_xid != xid) {
+ inode->i_uid != fattr->uid ||
+ inode->i_gid != fattr->gid) {
struct rpc_cred **cred = &NFS_I(inode)->cache_access.cred;
if (*cred) {
put_rpccred(*cred);
inode->i_mode = fattr->mode;
inode->i_nlink = fattr->nlink;
- inode->i_uid = uid;
- inode->i_gid = gid;
- inode->i_xid = xid;
+ inode->i_uid = fattr->uid;
+ inode->i_gid = fattr->gid;
if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
/*
static int
nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
int status, count, recvd, hdrlen;
if ((status = ntohl(*p++)))
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
-#include <linux/vserver/xid.h>
#define NFSDBG_FACILITY NFSDBG_XDR
}
static inline u32 *
-xdr_encode_sattr(u32 *p, struct iattr *attr, int tagxid)
+xdr_encode_sattr(u32 *p, struct iattr *attr)
{
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_UID ||
- (tagxid && (attr->ia_valid & ATTR_XID))) {
+ if (attr->ia_valid & ATTR_UID) {
*p++ = xdr_one;
- *p++ = htonl(XIDINO_UID(tagxid, attr->ia_uid, attr->ia_xid));
+ *p++ = htonl(attr->ia_uid);
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_GID ||
- (tagxid && (attr->ia_valid & ATTR_XID))) {
+ if (attr->ia_valid & ATTR_GID) {
*p++ = xdr_one;
- *p++ = htonl(XIDINO_GID(tagxid, attr->ia_gid, attr->ia_xid));
+ *p++ = htonl(attr->ia_gid);
} else {
*p++ = xdr_zero;
}
nfs3_xdr_sattrargs(struct rpc_rqst *req, u32 *p, struct nfs3_sattrargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
*p++ = htonl(args->guard);
if (args->guard)
p = xdr_encode_time3(p, &args->guardtime);
*p++ = args->verifier[0];
*p++ = args->verifier[1];
} else
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
{
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
p = xdr_encode_array(p, args->topath, args->tolen);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
*p++ = htonl(args->type);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
if (args->type == NF3CHR || args->type == NF3BLK) {
*p++ = htonl(MAJOR(args->rdev));
*p++ = htonl(MINOR(args->rdev));
nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
static int
nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
int status, count, ocount, recvd, hdrlen;
status = ntohl(*p++);
WRITE32(FATTR4_WORD0_FILEID);
WRITE32(0);
- /* set up reply kvec
+ /* set up reply iovec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READDIR + status + verifer(2) = 9
*/
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
- /* set up reply kvec
+ /* set up reply iovec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READLINK + status = 7
*/
if (status)
goto out;
- /* set up reply kvec
+ /* set up reply iovec
* toplevel status + taglen=0 + rescount + OP_PUTFH + status
* + OP_READ + status + eof + datalen = 9
*/
static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
uint32_t *p;
uint32_t count, eof, recvd, hdrlen;
int status;
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct page *page = *rcvbuf->pages;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int nr, pglen = rcvbuf->page_len;
uint32_t *end, *entry, *p, *kaddr;
uint32_t len, attrlen, word;
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
uint32_t *strlen;
unsigned int hdrlen, len;
char *string;
#include <linux/root_dev.h>
#include <net/ipconfig.h>
#include <linux/parser.h>
-#include <linux/vs_cvirt.h>
/* Define this to allow debugging output */
#undef NFSROOT_DEBUG
Opt_soft, Opt_hard, Opt_intr,
Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac,
Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
- Opt_broken_suid, Opt_tagxid,
+ Opt_broken_suid,
/* Error token */
Opt_err
};
{Opt_tcp, "proto=tcp"},
{Opt_tcp, "tcp"},
{Opt_broken_suid, "broken_suid"},
- {Opt_tagxid, "tagxid"},
{Opt_err, NULL}
};
case Opt_broken_suid:
nfs_data.flags |= NFS_MOUNT_BROKEN_SUID;
break;
- case Opt_tagxid:
- nfs_data.flags |= NFS_MOUNT_TAGXID;
- break;
default :
return 0;
}
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/nfsd/nfsd.h>
-#include <linux/vserver/xid.h>
#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
}
if (cred->cr_uid != (uid_t) -1)
- current->fsuid = INOXID_UID(1, cred->cr_uid, cred->cr_gid);
+ current->fsuid = cred->cr_uid;
else
current->fsuid = exp->ex_anon_uid;
if (cred->cr_gid != (gid_t) -1)
- current->fsgid = INOXID_GID(1, cred->cr_uid, cred->cr_gid);
+ current->fsgid = cred->cr_gid;
else
current->fsgid = exp->ex_anon_gid;
-
- current->xid = INOXID_XID(1, cred->cr_uid, cred->cr_gid, 0);
if (!cred->cr_group_info)
return -ENOMEM;
ret = set_current_groups(cred->cr_group_info);
- if (INOXID_UID(1, cred->cr_uid, cred->cr_gid)) {
+ if ((cred->cr_uid)) {
cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
} else {
cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/xdr3.h>
-#include <linux/vserver/xid.h>
#define NFSDDBG_FACILITY NFSDDBG_XDR
static inline u32 *
encode_fh(u32 *p, struct svc_fh *fhp)
{
- unsigned int size = fhp->fh_handle.fh_size;
+ int size = fhp->fh_handle.fh_size;
*p++ = htonl(size);
if (size) p[XDR_QUADLEN(size)-1]=0;
memcpy(p, &fhp->fh_handle.fh_base, size);
decode_sattr3(u32 *p, struct iattr *iap)
{
u32 tmp;
- uid_t uid = 0;
- gid_t gid = 0;
iap->ia_valid = 0;
}
if (*p++) {
iap->ia_valid |= ATTR_UID;
- uid = ntohl(*p++);
+ iap->ia_uid = ntohl(*p++);
}
if (*p++) {
iap->ia_valid |= ATTR_GID;
- gid = ntohl(*p++);
+ iap->ia_gid = ntohl(*p++);
}
- iap->ia_uid = INOXID_UID(1, uid, gid);
- iap->ia_gid = INOXID_GID(1, uid, gid);
- iap->ia_xid = INOXID_XID(1, uid, gid, 0);
if (*p++) {
u64 newsize;
*p++ = htonl(nfs3_ftypes[(stat.mode & S_IFMT) >> 12]);
*p++ = htonl((u32) stat.mode);
*p++ = htonl((u32) stat.nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode), stat.uid, stat.xid)));
- *p++ = htonl((u32) nfsd_rgid(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode), stat.gid, stat.xid)));
+ *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
+ *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) {
p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
} else {
nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_readargs *args)
{
- unsigned int len;
+ int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh))
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
- /* set up the kvec */
+ /* set up the iovec */
v=0;
while (len > 0) {
pn = rqstp->rq_resused;
svc_take_page(rqstp);
args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
args->vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE;
- len -= args->vec[v].iov_len;
v++;
+ len -= PAGE_SIZE;
}
args->vlen = v;
return xdr_argsize_check(rqstp, p);
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_writeargs *args)
{
- unsigned int len, v, hdr;
+ int len, v;
if (!(p = decode_fh(p, &args->fh))
|| !(p = xdr_decode_hyper(p, &args->offset)))
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- if (rqstp->rq_arg.len < len + hdr)
- return 0;
-
args->vec[0].iov_base = (void*)p;
- args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len -
+ (((void*)p) - rqstp->rq_arg.head[0].iov_base);
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_symlinkargs *args)
{
- unsigned int len;
+ int len;
int avail;
char *old, *new;
- struct kvec *vec;
+ struct iovec *vec;
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
*/
svc_take_page(rqstp);
len = ntohl(*p++);
- if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
+ if (len <= 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
return 0;
args->tname = new = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
args->tlen = len;
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
status = get_write_access(filp->f_dentry->d_inode);
- if (status)
+ if (!status)
+ filp->f_mode = FMODE_WRITE;
+ else
return nfserrno(status);
- filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ;
}
return nfs_ok;
}
{
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
put_write_access(filp->f_dentry->d_inode);
- filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
+ filp->f_mode = FMODE_READ;
}
}
#include <linux/nfsd/state.h>
#include <linux/nfsd/xdr4.h>
#include <linux/nfsd_idmap.h>
-#include <linux/vserver/xid.h>
#define NFSDDBG_FACILITY NFSDDBG_XDR
WRITE32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
- status = nfsd4_encode_user(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode),
- stat.uid, stat.xid), &p, &buflen);
+ status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
- status = nfsd4_encode_group(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode),
- stat.gid, stat.xid), &p, &buflen);
+ status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
/*
* All that remains is to write the tag and operation count...
*/
- struct kvec *iov;
+ struct iovec *iov;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
static struct svc_cacherep * nfscache;
static int cache_disabled = 1;
-static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
+static int nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *vec);
/*
* locking for the reply cache:
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
{
struct svc_cacherep *rp;
- struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
+ struct iovec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
if (!(rp = rqstp->rq_cacherep) || cache_disabled)
* keep a refcount....
*/
static int
-nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *data)
{
- struct kvec *vec = &rqstp->rq_res.head[0];
+ struct iovec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/xdr.h>
#include <linux/mm.h>
-#include <linux/vserver/xid.h>
#define NFSDDBG_FACILITY NFSDDBG_XDR
decode_sattr(u32 *p, struct iattr *iap)
{
u32 tmp, tmp1;
- uid_t uid = 0;
- gid_t gid = 0;
iap->ia_valid = 0;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_UID;
- uid = tmp;
+ iap->ia_uid = tmp;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_GID;
- gid = tmp;
+ iap->ia_gid = tmp;
}
- iap->ia_uid = INOXID_UID(1, uid, gid);
- iap->ia_gid = INOXID_GID(1, uid, gid);
- iap->ia_xid = INOXID_XID(1, uid, gid, 0);
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = tmp;
*p++ = htonl(nfs_ftypes[type >> 12]);
*p++ = htonl((u32) stat.mode);
*p++ = htonl((u32) stat.nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode), stat.uid, stat.xid)));
- *p++ = htonl((u32) nfsd_rgid(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode), stat.gid, stat.xid)));
+ *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
+ *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
nfssvc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_readargs *args)
{
- unsigned int len;
+ int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh)))
return 0;
svc_take_page(rqstp);
args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
args->vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE;
- len -= args->vec[v].iov_len;
v++;
+ len -= PAGE_SIZE;
}
args->vlen = v;
return xdr_argsize_check(rqstp, p);
nfssvc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_writeargs *args)
{
- unsigned int len;
+ int len;
int v;
if (!(p = decode_fh(p, &args->fh)))
return 0;
*/
int
nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *count)
+ struct iovec *vec, int vlen, unsigned long *count)
{
struct raparms *ra;
mm_segment_t oldfs;
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
- err = vfs_readv(&file, (struct iovec __user *)vec, vlen, &offset);
+ err = vfs_readv(&file, vec, vlen, &offset);
set_fs(oldfs);
}
*/
int
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen,
+ struct iovec *vec, int vlen,
unsigned long cnt, int *stablep)
{
struct svc_export *exp;
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
- err = vfs_writev(&file, (struct iovec __user *)vec, vlen, &offset);
+ err = vfs_writev(&file, vec, vlen, &offset);
set_fs(oldfs);
if (err >= 0) {
nfsdstats.io_write += cnt;
*/
if (!(acc & MAY_LOCAL_ACCESS))
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
- if (EX_RDONLY(exp) || IS_RDONLY(inode)
- || (exp && MNT_IS_RDONLY(exp->ex_mnt)))
+ if (EX_RDONLY(exp) || IS_RDONLY(inode))
return nfserr_rofs;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
- ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
+ ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* or signals an error (both covered by the rc test).
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
- ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);
+ ntfs_debug("In index root, offset 0x%x.", (u8*)ie - (u8*)ir);
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
goto read_partial_upcase_page;
}
vol->upcase_len = ino->i_size >> UCHAR_T_SIZE_BITS;
- ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
+ ntfs_debug("Read %llu bytes from $UpCase (expected %u bytes).",
ino->i_size, 64 * 1024 * sizeof(ntfschar));
iput(ino);
down(&ntfs_lock);
if (retval == 0 && buf->f_frsize == 0)
buf->f_frsize = buf->f_bsize;
}
- if (!vx_check(0, VX_ADMIN|VX_WATCH))
- vx_vsi_statfs(sb, buf);
}
return retval;
}
goto dput_and_out;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
error = -EPERM;
inode = nd.dentry->d_inode;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
/* Don't worry, the checks are done in inode_change_ok() */
inode = nd.dentry->d_inode;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
/* Don't worry, the checks are done in inode_change_ok() */
if (!res) {
res = permission(nd.dentry->d_inode, mode, &nd);
/* SuS v2 requires we report a read only fs too */
- if(!res && (mode & S_IWOTH)
- && (IS_RDONLY(nd.dentry->d_inode) || MNT_IS_RDONLY(nd.mnt))
+ if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
&& !special_file(nd.dentry->d_inode->i_mode))
res = -EROFS;
path_release(&nd);
dentry = file->f_dentry;
inode = dentry->d_inode;
+ err = -EPERM;
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ goto out_putf;
err = -EROFS;
- if (IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt)))
+ if (IS_RDONLY(inode))
goto out_putf;
err = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out;
inode = nd.dentry->d_inode;
+ error = -EPERM;
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ goto dput_and_out;
+
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
error = -EPERM;
return error;
}
-static int chown_common(struct dentry *dentry, struct vfsmount *mnt,
- uid_t user, gid_t group)
+static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
{
struct inode * inode;
int error;
goto out;
}
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
+ if (IS_RDONLY(inode))
goto out;
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
error = user_path_walk(filename, &nd);
if (!error) {
- error = chown_common(nd.dentry, nd.mnt, user, group);
+ error = chown_common(nd.dentry, user, group);
path_release(&nd);
}
return error;
error = user_path_walk_link(filename, &nd);
if (!error) {
- error = chown_common(nd.dentry, nd.mnt, user, group);
+ error = chown_common(nd.dentry, user, group);
path_release(&nd);
}
return error;
file = fget(fd);
if (file) {
- error = chown_common(file->f_dentry, file->f_vfsmnt, user, group);
+ error = chown_common(file->f_dentry, user, group);
fput(file);
}
return error;
if (!f)
goto cleanup_dentry;
f->f_flags = flags;
- f->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+ f->f_mode = (flags+1) & O_ACCMODE;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = get_write_access(inode);
FD_SET(fd, files->open_fds);
FD_CLR(fd, files->close_on_exec);
files->next_fd = fd + 1;
- // vx_openfd_inc(fd);
+ vx_openfd_inc(fd);
#if 1
/* Sanity check */
if (files->fd[fd] != NULL) {
__FD_CLR(fd, files->open_fds);
if (fd < files->next_fd)
files->next_fd = fd;
- // vx_openfd_dec(fd);
}
void fastcall put_unused_fd(unsigned int fd)
FD_CLR(fd, files->close_on_exec);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
+ vx_openfd_dec(fd);
return filp_close(filp, files);
out_unlock:
}
EXPORT_SYMBOL(generic_file_open);
-
-/*
- * This is used by subsystems that don't want seekable
- * file descriptors
- */
-int nonseekable_open(struct inode *inode, struct file *filp)
-{
- filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- return 0;
-}
-
-EXPORT_SYMBOL(nonseekable_open);
static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
static int openpromfs_unlink (struct inode *, struct dentry *dentry);
-static ssize_t nodenum_read(struct file *file, char __user *buf,
+static ssize_t nodenum_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
return count;
}
-static ssize_t property_read(struct file *filp, char __user *buf,
+static ssize_t property_read(struct file *filp, char *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
i = ((u32)(long)inode->u.generic_ip) >> 16;
if ((u16)((long)inode->u.generic_ip) == aliases) {
if (i >= aliases_nodes)
- p = NULL;
+ p = 0;
else
p = alias_names [i];
} else
return -EIO;
op->value [k] = 0;
if (k) {
- for (s = NULL, p = op->value; p < op->value + k; p++) {
+ for (s = 0, p = op->value; p < op->value + k; p++) {
if ((*p >= ' ' && *p <= '~') || *p == '\n') {
op->flag |= OPP_STRING;
s = p;
return count;
}
-static ssize_t property_write(struct file *filp, const char __user *buf,
+static ssize_t property_write(struct file *filp, const char *buf,
size_t count, loff_t *ppos)
{
int i, j, k;
if (filp->f_pos >= 0xffffff || count >= 0xffffff)
return -EINVAL;
if (!filp->private_data) {
- i = property_read (filp, NULL, 0, NULL);
+ i = property_read (filp, NULL, 0, 0);
if (i)
return i;
}
mask &= mask2;
if (mask) {
*first &= ~mask;
- *first |= simple_strtoul (tmp, NULL, 16);
+ *first |= simple_strtoul (tmp, 0, 16);
op->flag |= OPP_DIRTY;
}
} else {
for (j = 0; j < first_off; j++)
mask >>= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp,NULL,16);
+ *q |= simple_strtoul (tmp,0,16);
}
buf += 9;
} else if ((q == last - 1) && last_cnt
for (j = 0; j < 8 - last_cnt; j++)
mask <<= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp, NULL, 16);
+ *q |= simple_strtoul (tmp, 0, 16);
buf += last_cnt;
} else {
char tchars[17]; /* XXX yuck... */
if (copy_from_user(tchars, buf, 16))
return -EFAULT;
- *q = simple_strtoul (tchars, NULL, 16);
+ *q = simple_strtoul (tchars, 0, 16);
buf += 9;
}
}
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
- return -EIO;
+ return res;
for (p = 1; p < state->limit; p++) {
sector_t size = state->parts[p].size;
sector_t from = state->parts[p].from;
#endif
}
kfree(state);
- return 0;
+ return res;
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
+ /* pread is not allowed on pipes. */
+ if (unlikely(ppos != &filp->f_pos))
+ return -ESPIPE;
+
total_len = iov_length(iov, nr_segs);
/* Null read succeeds. */
if (unlikely(total_len == 0))
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
+ /* pwrite is not allowed on pipes. */
+ if (unlikely(ppos != &filp->f_pos))
+ return -ESPIPE;
+
total_len = iov_length(iov, nr_segs);
/* Null write succeeds. */
if (unlikely(total_len == 0))
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
}
if (ret > 0)
- inode_update_time(inode, filp->f_vfsmnt, 1); /* mtime and ctime */
+ inode_update_time(inode, 1); /* mtime and ctime */
return ret;
}
f1->f_pos = f2->f_pos = 0;
f1->f_flags = O_RDONLY;
f1->f_op = &read_pipe_fops;
- f1->f_mode = FMODE_READ;
+ f1->f_mode = 1;
f1->f_version = 0;
/* write file */
f2->f_flags = O_WRONLY;
f2->f_op = &write_pipe_fops;
- f2->f_mode = FMODE_WRITE;
+ f2->f_mode = 2;
f2->f_version = 0;
fd_install(i, f1);
return error;
}
-EXPORT_SYMBOL_GPL(do_pipe);
-
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
TASK_INTERRUPTIBLE |
TASK_UNINTERRUPTIBLE |
TASK_ZOMBIE |
- TASK_DEAD |
TASK_STOPPED |
TASK_ONHOLD);
const char **p = &task_state_array[0];
{
struct group_info *group_info;
int g;
- pid_t pid, ppid, tgid;
+ pid_t ppid;
read_lock(&tasklist_lock);
- tgid = vx_map_tgid(current->vx_info, p->tgid);
- pid = vx_map_tgid(current->vx_info, p->pid);
ppid = vx_map_tgid(current->vx_info, p->real_parent->pid);
buffer += sprintf(buffer,
"State:\t%s\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
(p->sleep_avg/1024)*100/(1020000000/1024),
- tgid, pid, p->pid ? ppid : 0,
+ p->tgid,
+ p->pid, p->pid ? ppid : 0,
p->pid && p->ptrace ? p->parent->pid : 0,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
sigset_t sigign, sigcatch;
char state;
int res;
- pid_t pid, ppid, pgid = -1, sid = -1;
+ pid_t ppid, pgid = -1, sid = -1;
int num_threads = 0;
struct mm_struct *mm;
unsigned long long start_time;
if (bias_jiffies > task->start_time)
bias_jiffies = task->start_time;
}
- pid = vx_map_tgid(task->vx_info, task->pid);
mm = task->mm;
if(mm)
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
- pid,
+ task->pid,
task->comm,
state,
ppid,
res = sprintf(buffer,"%u %llu %llu %u %llu %u %llu\n",
get_delay(task,runs),
- (unsigned long long)get_delay(task,runcpu_total),
- (unsigned long long)get_delay(task,waitcpu_total),
+ get_delay(task,runcpu_total),
+ get_delay(task,waitcpu_total),
get_delay(task,num_iowaits),
- (unsigned long long)get_delay(task,iowait_total),
+ get_delay(task,iowait_total),
get_delay(task,num_memwaits),
- (unsigned long long)get_delay(task,mem_iowait_total)
+ get_delay(task,mem_iowait_total)
);
return res;
}
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/ptrace.h>
+#include <linux/vs_context.h>
#include <linux/vs_network.h>
#include <linux/vs_cvirt.h>
+
/*
* For hysterical raisins we keep the same inumbers as in the old procfs.
* Feel free to change the macro below - just keep the range distinct from
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/idr.h>
-#include <linux/namei.h>
#include <linux/vs_base.h>
#include <linux/vserver/inode.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
- inode->i_xid = vx_current_xid();
break;
}
}
child = NULL;
while ((child = of_get_next_child(np, child))) {
p = strrchr(child->full_name, '/');
- if (!p)
+ if (p == 0)
p = child->full_name;
else
++p;
lastp = &al->next;
}
of_node_put(child);
- *lastp = NULL;
+ *lastp = 0;
de->subdir = list;
}
struct device_node *root;
if ( !have_of )
return;
- proc_device_tree = proc_mkdir("device-tree", NULL);
+ proc_device_tree = proc_mkdir("device-tree", 0);
if (proc_device_tree == 0)
return;
root = of_find_node_by_path("/");
// create the default set of magic files
clstype = (RCFS_I(dentry->d_inode))->core->classtype;
rcfs_create_magic(dentry, &(((struct rcfs_magf *)clstype->mfdesc)[1]),
- clstype->mfcount - 3);
+ clstype->mfcount - 2);
return retval;
*resstr = NULL; \
\
if (!options) \
- return 0; \
+ return -EINVAL; \
\
while ((p = strsep(&options, ",")) != NULL) { \
substring_t args[MAX_OPT_ARGS]; \
switch (token) { \
case FUNC ## _res_type: \
*resstr = match_strdup(args); \
- if (!strcmp(#FUNC, "config")) { \
- char *str = p + strlen(p) + 1; \
- *otherstr = kmalloc(strlen(str) + 1, \
- GFP_KERNEL); \
- if (*otherstr == NULL) { \
- kfree(*resstr); \
- *resstr = NULL; \
- return 0; \
- } else { \
- strcpy(*otherstr, str); \
- return 1; \
- } \
- } \
break; \
case FUNC ## _str: \
*otherstr = match_strdup(args); \
break; \
default: \
- return 0; \
+ return -EINVAL; \
} \
} \
- return (*resstr != NULL); \
+ if (*resstr) \
+ return 0; \
+ return -EINVAL; \
}
#define MAGIC_WRITE(FUNC,CLSTYPEFUN) \
EXPORT_SYMBOL(FUNC ## _fileops);
/******************************************************************************
- * Shared function used by Target / Reclassify
+ * Target
*
+ * pseudo file for manually reclassifying members to a class
*
*****************************************************************************/
#define TARGET_MAX_INPUT_SIZE 100
static ssize_t
-target_reclassify_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos, int manual)
+target_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * ppos)
{
struct rcfs_inode_info *ri = RCFS_I(file->f_dentry->d_inode);
char *optbuf;
clstype = ri->core->classtype;
if (clstype->forced_reclassify)
- rc = (*clstype->forced_reclassify) (manual ? ri->core: NULL, optbuf);
+ rc = (*clstype->forced_reclassify) (ri->core, optbuf);
up(&(ri->vfs_inode.i_sem));
kfree(optbuf);
}
-/******************************************************************************
- * Target
- *
- * pseudo file for manually reclassifying members to a class
- *
- *****************************************************************************/
-
-static ssize_t
-target_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos)
-{
- return target_reclassify_write(file,buf,count,ppos,1);
-}
-
struct file_operations target_fileops = {
.write = target_write,
};
EXPORT_SYMBOL(target_fileops);
-/******************************************************************************
- * Reclassify
- *
- * pseudo file for reclassification of an object through CE
- *
- *****************************************************************************/
-
-static ssize_t
-reclassify_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos)
-{
- return target_reclassify_write(file,buf,count,ppos,0);
-}
-
-struct file_operations reclassify_fileops = {
- .write = reclassify_write,
-};
-
-EXPORT_SYMBOL(reclassify_fileops);
-
/******************************************************************************
* Config
*
static match_table_t config_tokens = {
{config_res_type, "res=%s"},
+ {config_str, "config=%s"},
{config_err, NULL},
};
}
}
- printk(KERN_DEBUG "Set %s shares to %d %d %d %d\n",
+ printk(KERN_ERR "Set %s shares to %d %d %d %d\n",
resname,
newshares.my_guarantee,
newshares.my_limit,
return -EINVAL;
rootdesc = &mfdesc[0];
- printk(KERN_DEBUG "allocating classtype root <%s>\n", rootdesc->name);
+ printk("allocating classtype root <%s>\n", rootdesc->name);
dentry = rcfs_create_internal(rcfs_rootde, rootdesc, 0);
if (!dentry) {
.i_op = &my_iops,
.i_fop = &target_fileops,
},
- {
- .name = "reclassify",
- .mode = RCFS_DEFAULT_FILE_MODE,
- .i_op = &my_iops,
- .i_fop = &reclassify_fileops,
- },
};
struct rcfs_magf sock_magf[] = {
clstype = ckrm_classtypes[i];
if (clstype == NULL)
continue;
- printk(KERN_DEBUG "A non null classtype\n");
+ printk("A non null classtype\n");
if ((rc = rcfs_register_classtype(clstype)))
continue; // could return with an error too
#define TC_FILE_MODE (S_IFREG | S_IRUGO | S_IWUSR)
-#define NR_TCROOTMF 7
+#define NR_TCROOTMF 6
struct rcfs_magf tc_rootdesc[NR_TCROOTMF] = {
/* First entry must be root */
{
.i_fop = &shares_fileops,
.i_op = &rcfs_file_inode_operations,
},
- // Reclassify and Config should be made available only at the
- // root level. Make sure they are the last two entries, as
- // rcfs_mkdir depends on it
- {
- .name = "reclassify",
- .mode = TC_FILE_MODE,
- .i_fop = &reclassify_fileops,
- .i_op = &rcfs_file_inode_operations,
- },
+ // Config should be made available only at the root level
+ // Make sure this is the last entry, as rcfs_mkdir depends on it
{
.name = "config",
.mode = TC_FILE_MODE,
{
loff_t (*fn)(struct file *, loff_t, int);
- fn = no_llseek;
- if (file->f_mode & FMODE_LSEEK) {
- fn = default_llseek;
- if (file->f_op && file->f_op->llseek)
- fn = file->f_op->llseek;
- }
+ fn = default_llseek;
+ if (file->f_op && file->f_op->llseek)
+ fn = file->f_op->llseek;
return fn(file, offset, origin);
}
EXPORT_SYMBOL(vfs_llseek);
EXPORT_SYMBOL(vfs_write);
-static inline loff_t file_pos_read(struct file *file)
-{
- return file->f_pos;
-}
-
-static inline void file_pos_write(struct file *file, loff_t pos)
-{
- file->f_pos = pos;
-}
-
asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
{
struct file *file;
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_read(file, buf, count, &pos);
- file_pos_write(file, pos);
+ ret = vfs_read(file, buf, count, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_write(file, buf, count, &pos);
- file_pos_write(file, pos);
+ ret = vfs_write(file, buf, count, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = -ESPIPE;
- if (file->f_mode & FMODE_PREAD)
- ret = vfs_read(file, buf, count, &pos);
+ ret = vfs_read(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = -ESPIPE;
- if (file->f_mode & FMODE_PWRITE)
- ret = vfs_write(file, buf, count, &pos);
+ ret = vfs_write(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_readv(file, vec, vlen, &pos);
- file_pos_write(file, pos);
+ ret = vfs_readv(file, vec, vlen, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_writev(file, vec, vlen, &pos);
- file_pos_write(file, pos);
+ ret = vfs_writev(file, vec, vlen, &file->f_pos);
fput_light(file, fput_needed);
}
goto fput_in;
if (!in_file->f_op || !in_file->f_op->sendfile)
goto fput_in;
- retval = -ESPIPE;
if (!ppos)
ppos = &in_file->f_pos;
- else
- if (!(in_file->f_mode & FMODE_PREAD))
- goto fput_in;
retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, *ppos, count);
if (retval)
goto fput_in;
if (res)
goto out;
- inode_update_time(inode, file->f_vfsmnt, 1); /* Both mtime and ctime */
+ inode_update_time(inode, 1); /* Both mtime and ctime */
// Ok, we are done with all the checks.
REISERFS_I(inode)->i_attrs = sd_v2_attrs( sd );
sd_attrs_to_i_attrs( sd_v2_attrs( sd ), inode );
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid, 0);
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ inode->i_xid = INOXID_XID(uid, gid, 0);
pathrelse (path);
if (S_ISREG (inode->i_mode)) {
static void inode2sd (void * sd, struct inode * inode, loff_t size)
{
struct stat_data * sd_v2 = (struct stat_data *)sd;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
__u16 flags;
set_sd_v2_uid(sd_v2, uid );
flags &= REISERFS_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case REISERFS_IOC_SETFLAGS: {
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EFAULT;
oldflags = REISERFS_I(inode) -> i_attrs;
- if ( ( (oldflags & REISERFS_IMMUTABLE_FL) ||
- ( (flags ^ oldflags) &
- (REISERFS_IMMUTABLE_FL | REISERFS_IUNLINK_FL |
- REISERFS_APPEND_FL) ) ) &&
- !capable( CAP_LINUX_IMMUTABLE ) )
+ if ( (oldflags & REISERFS_IMMUTABLE_FL) || ( ( (flags ^ oldflags) &
+ (REISERFS_IMMUTABLE_FL | REISERFS_IUNLINK_FL | REISERFS_APPEND_FL)) &&
+ !capable( CAP_LINUX_IMMUTABLE ) ) )
return -EPERM;
if( ( flags & REISERFS_NOTAIL_FL ) &&
case REISERFS_IOC_SETVERSION:
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(inode->i_generation, (int __user *) arg))
return -EFAULT;
{"noattrs", .clrmask = 1<<REISERFS_ATTRS},
{"user_xattr", .setmask = 1<<REISERFS_XATTRS_USER},
{"nouser_xattr",.clrmask = 1<<REISERFS_XATTRS_USER},
- {"tagxid", .setmask = 1<<REISERFS_TAGXID},
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
{"acl", .setmask = 1<<REISERFS_POSIXACL},
{"noacl", .clrmask = 1<<REISERFS_POSIXACL},
{
umode_t mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
-
if (mask & MAY_WRITE) {
/*
* Nobody gets write access to a read-only fs.
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
#endif
} else {
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
check_groups:
-#endif
if (in_group_p(inode->i_gid))
mode >>= 3;
}
if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
return 0;
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
check_capabilities:
-#endif
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable if at least one exec bit is set.
sema_init(&p->sem, 1);
p->op = op;
file->private_data = p;
-
- /* SEQ files support lseek, but not pread/pwrite */
- file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(seq_open);
void *p;
int err = 0;
+ if (ppos != &file->f_pos)
+ return -EPIPE;
+
down(&m->sem);
/* grab buffer if we didn't have one */
if (!m->buf) {
int rq_bytes_sent;
int rq_iovlen;
- struct kvec rq_iov[4];
+ struct iovec rq_iov[4];
int (*rq_setup_read) (struct smb_request *);
void (*rq_callback) (struct smb_request *);
static int
_recvfrom(struct socket *socket, unsigned char *ubuf, int size, unsigned flags)
{
- struct kvec iov = {ubuf, size};
- struct msghdr msg = {.msg_flags = flags};
- msg.msg_flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
- return kernel_recvmsg(socket, &msg, &iov, 1, size, msg.msg_flags);
+ struct iovec iov;
+ struct msghdr msg;
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs(get_ds());
+ flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
+
+ msg.msg_flags = flags;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ size = sock_recvmsg(socket, &msg, size, flags);
+
+ set_fs(fs);
+ return size;
}
/*
}
/*
- * Adjust the kvec to move on 'n' bytes (from nfs/sunrpc)
+ * Adjust the iovec to move on 'n' bytes (from nfs/sunrpc)
*/
static int
-smb_move_iov(struct kvec **data, size_t *num, struct kvec *vec, unsigned amount)
+smb_move_iov(struct msghdr *msg, struct iovec *niv, unsigned amount)
{
- struct kvec *iv = *data;
+ struct iovec *iv = msg->msg_iov;
int i;
int len;
/*
- * Eat any sent kvecs
+ * Eat any sent iovecs
*/
while (iv->iov_len <= amount) {
amount -= iv->iov_len;
iv++;
- (*num)--;
+ msg->msg_iovlen--;
}
/*
* And chew down the partial one
*/
- vec[0].iov_len = iv->iov_len-amount;
- vec[0].iov_base =((unsigned char *)iv->iov_base)+amount;
+ niv[0].iov_len = iv->iov_len-amount;
+ niv[0].iov_base =((unsigned char *)iv->iov_base)+amount;
iv++;
- len = vec[0].iov_len;
+ len = niv[0].iov_len;
/*
* And copy any others
*/
- for (i = 1; i < *num; i++) {
- vec[i] = *iv++;
- len += vec[i].iov_len;
+ for (i = 1; i < msg->msg_iovlen; i++) {
+ niv[i] = *iv++;
+ len += niv[i].iov_len;
}
- *data = vec;
+ msg->msg_iov = niv;
return len;
}
{
struct socket *sock;
unsigned int flags;
- struct kvec iov;
+ struct iovec iov;
struct msghdr msg;
+ mm_segment_t fs;
int rlen = smb_len(server->header) - server->smb_read + 4;
int result = -EIO;
- if (rlen > PAGE_SIZE)
- rlen = PAGE_SIZE;
-
sock = server_sock(server);
if (!sock)
goto out;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ fs = get_fs();
+ set_fs(get_ds());
+
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
iov.iov_base = drop_buffer;
iov.iov_len = PAGE_SIZE;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
msg.msg_control = NULL;
- result = kernel_recvmsg(sock, &msg, &iov, 1, rlen, flags);
+ if (rlen > PAGE_SIZE)
+ rlen = PAGE_SIZE;
+
+ result = sock_recvmsg(sock, &msg, rlen, flags);
+
+ set_fs(fs);
VERBOSE("read: %d\n", result);
if (result < 0) {
{
struct socket *sock;
unsigned int flags;
- struct kvec iov[4];
- struct kvec *p = req->rq_iov;
- size_t num = req->rq_iovlen;
+ struct iovec iov[4];
struct msghdr msg;
+ mm_segment_t fs;
int rlen;
int result = -EIO;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ fs = get_fs();
+ set_fs(get_ds());
+
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = req->rq_iov;
+ msg.msg_iovlen = req->rq_iovlen;
msg.msg_control = NULL;
/* Dont repeat bytes and count available bufferspace */
- rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd);
+ rlen = smb_move_iov(&msg, iov, req->rq_bytes_recvd);
if (req->rq_rlen < rlen)
rlen = req->rq_rlen;
- result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
+ result = sock_recvmsg(sock, &msg, rlen, flags);
+
+ set_fs(fs);
VERBOSE("read: %d\n", result);
if (result < 0) {
int
smb_send_request(struct smb_request *req)
{
+ mm_segment_t fs;
struct smb_sb_info *server = req->rq_server;
struct socket *sock;
- struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
+ struct msghdr msg;
int slen = req->rq_slen - req->rq_bytes_sent;
int result = -EIO;
- struct kvec iov[4];
- struct kvec *p = req->rq_iov;
- size_t num = req->rq_iovlen;
+ struct iovec iov[4];
sock = server_sock(server);
if (!sock)
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_iov = req->rq_iov;
+ msg.msg_iovlen = req->rq_iovlen;
+ msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+
/* Dont repeat bytes */
if (req->rq_bytes_sent)
- smb_move_iov(&p, &num, iov, req->rq_bytes_sent);
+ smb_move_iov(&msg, iov, req->rq_bytes_sent);
- result = kernel_sendmsg(sock, &msg, p, num, slen);
+ fs = get_fs();
+ set_fs(get_ds());
+ result = sock_sendmsg(sock, &msg, slen);
+ set_fs(fs);
if (result >= 0) {
req->rq_bytes_sent += result;
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
- stat->xid = inode->i_xid;
stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
#include <linux/vfs.h>
#include <linux/writeback.h> /* for the emergency remount stuff */
#include <linux/idr.h>
-#include <linux/devpts_fs.h>
-#include <linux/proc_fs.h>
#include <asm/uaccess.h>
sb = type->get_sb(type, flags, name, data);
if (IS_ERR(sb))
goto out_free_secdata;
-
- error = -EPERM;
- if (!capable(CAP_SYS_ADMIN) && !sb->s_bdev &&
- (sb->s_magic != PROC_SUPER_MAGIC) &&
- (sb->s_magic != DEVPTS_SUPER_MAGIC))
- goto out_sb;
-
error = security_sb_kern_mount(sb, secdata);
if (error)
goto out_sb;
#include "sysfs.h"
+/* Random magic number */
+#define SYSFS_MAGIC 0x62656572
struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = SYSFS_SUPER_MAGIC;
+ sb->s_magic = SYSFS_MAGIC;
sb->s_op = &sysfs_ops;
sysfs_sb = sb;
#define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
#define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
#define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
-#define LINUX_XFLAG_BARRIER 0x00004000 /* chroot() barrier */
-#define LINUX_XFLAG_IUNLINK 0x00008000 /* Immutable unlink */
STATIC unsigned int
xfs_merge_ioc_xflags(
if (di_flags & XFS_DIFLAG_IMMUTABLE)
flags |= LINUX_XFLAG_IMMUTABLE;
- if (di_flags & XFS_DIFLAG_IUNLINK)
- flags |= LINUX_XFLAG_IUNLINK;
if (di_flags & XFS_DIFLAG_APPEND)
flags |= LINUX_XFLAG_APPEND;
if (di_flags & XFS_DIFLAG_SYNC)
int error;
int attr_flags;
unsigned int flags;
- unsigned int old_flags;
switch (cmd) {
case XFS_IOC_FSGETXATTR: {
attr_flags = 0;
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= ATTR_NONBLOCK;
-
+
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
va.va_xflags = fa.fsx_xflags;
va.va_extsize = fa.fsx_extsize;
return 0;
}
-STATIC int
-linvfs_setattr_flags(
- vattr_t *vap,
- unsigned int flags)
-{
- unsigned int oldflags, newflags;
-
- oldflags = vap->va_xflags;
- newflags = oldflags & ~(XFS_XFLAG_IMMUTABLE |
- XFS_XFLAG_IUNLINK | XFS_XFLAG_BARRIER);
- if (flags & ATTR_FLAG_IMMUTABLE)
- newflags |= XFS_XFLAG_IMMUTABLE;
- if (flags & ATTR_FLAG_IUNLINK)
- newflags |= XFS_XFLAG_IUNLINK;
- if (flags & ATTR_FLAG_BARRIER)
- newflags |= XFS_XFLAG_BARRIER;
-
- if (oldflags ^ newflags)
- vap->va_xflags = newflags;
- return 0;
-}
-
STATIC int
linvfs_setattr(
struct dentry *dentry,
flags |= ATTR_NONBLOCK;
#endif
- if (ia_valid & ATTR_ATTR_FLAG) {
- vattr.va_mask |= XFS_AT_XFLAGS;
- linvfs_setattr_flags(&vattr, attr->ia_attr_flags);
- }
-
VOP_SETATTR(vp, &vattr, flags, NULL, error);
if (error)
return -error;
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
- if (ip->i_d.di_flags & XFS_DIFLAG_IUNLINK)
- inode->i_flags |= S_IUNLINK;
- else
- inode->i_flags &= ~S_IUNLINK;
- if (ip->i_d.di_flags & XFS_DIFLAG_BARRIER)
- inode->i_flags |= S_BARRIER;
- else
- inode->i_flags &= ~S_BARRIER;
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
inode->i_flags |= S_APPEND;
else
int write,
struct file *filp,
void *buffer,
- size_t *lenp,
- loff_t *ppos)
+ size_t *lenp)
{
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
- ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
- if (va.va_xflags & XFS_XFLAG_IUNLINK)
- inode->i_flags |= S_IUNLINK;
- else
- inode->i_flags &= ~S_IUNLINK;
- if (va.va_xflags & XFS_XFLAG_BARRIER)
- inode->i_flags |= S_BARRIER;
- else
- inode->i_flags &= ~S_BARRIER;
if (va.va_xflags & XFS_XFLAG_APPEND)
inode->i_flags |= S_APPEND;
else
#define O_DIRECTORY 0100000 /* must be a directory */
#define O_NOFOLLOW 0200000 /* don't follow links */
#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
-#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */
#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
#define O_NOATIME 04000000
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
{INR_OPEN, INR_OPEN}, /* RLIMIT_NOFILE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \
- {32768, 32768 }, /* RLIMIT_MEMLOCK */ \
+ {PAGE_SIZE, PAGE_SIZE}, /* RLIMIT_MEMLOCK */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_LOCKS */ \
{MAX_SIGPENDING, MAX_SIGPENDING}, /* RLIMIT_SIGPENDING */ \
{MQ_BYTES_MAX, MQ_BYTES_MAX}, /* RLIMIT_MSGQUEUE */ \
#define __get_user_check(x,ptr,size,segment) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
__gu_err = 0; \
#define __put_user_check(x,ptr,size,segment) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
__pu_err = 0; \
--- /dev/null
+/* include/asm-arm/arch-lh7a40x/ide.h
+ *
+ * Copyright (C) 2004 Logic Product Development
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_ARCH_IDE_H
+#define __ASM_ARCH_IDE_H
+
+#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
+
+/* This implementation of ide.h only applies to the LPD CardEngines.
+ * Thankfully, there is less to do for the KEV.
+ */
+
+#include <linux/config.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/arch/registers.h>
+
+#define IDE_REG_LINE (1<<12) /* A12 drives !REG */
+#define IDE_ALT_LINE (1<<11) /* Unused A11 allows non-overlapping regions */
+#define IDE_CONTROLREG_OFFSET (0xe)
+
+void lpd7a40x_hwif_ioops (struct hwif_s* hwif);
+
+static __inline__ void ide_init_hwif_ports (hw_regs_t *hw, int data_port,
+ int ctrl_port, int *irq)
+{
+ ide_ioreg_t reg;
+ int i;
+ int regincr = 1;
+
+ memset (hw, 0, sizeof (*hw));
+
+ reg = (ide_ioreg_t) data_port;
+
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = reg;
+ reg += regincr;
+ }
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = (ide_ioreg_t) ctrl_port;
+
+ if (irq)
+ *irq = IDE_NO_IRQ;
+}
+
+static __inline__ void ide_init_default_hwifs (void)
+{
+ hw_regs_t hw;
+ struct hwif_s* hwif;
+
+ ide_init_hwif_ports (&hw,
+ CF_VIRT + IDE_REG_LINE,
+ CF_VIRT + IDE_REG_LINE + IDE_ALT_LINE
+ + IDE_CONTROLREG_OFFSET,
+ NULL);
+
+ ide_register_hw (&hw, &hwif);
+ lpd7a40x_hwif_ioops (hwif); /* Override IO routines */
+}
+#endif
+
+#endif
extern unsigned long s3c2410_hclk;
extern unsigned long s3c2410_fclk;
-/* external functions for GPIO support
- *
- * These allow various different clients to access the same GPIO
- * registers without conflicting. If your driver only owns the entire
- * GPIO register, then it is safe to ioremap/__raw_{read|write} to it.
-*/
-
-/* s3c2410_gpio_cfgpin
- *
- * set the configuration of the given pin to the value passed.
- *
- * eg:
- * s3c2410_gpio_cfgpin(S3C2410_GPA0, S3C2410_GPA0_ADDR0);
- * s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
-*/
-
-extern void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function);
-
-/* s3c2410_gpio_pullup
- *
- * configure the pull-up control on the given pin
- *
- * to = 1 => disable the pull-up
- * 0 => enable the pull-up
- *
- * eg;
- *
- * s3c2410_gpio_pullup(S3C2410_GPB0, 0);
- * s3c2410_gpio_pullup(S3C2410_GPE8, 0);
-*/
-
-extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
-
-extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
-
#endif /* __ASSEMBLY__ */
#include <asm/sizes.h>
/* linux/include/asm/hardware/s3c2410/
*
- * Copyright (c) 2003,2004 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
+ * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+ * http://www.simtec.co.uk/products/SWLINUX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* 19-06-2003 BJD Created file
* 23-06-2003 BJD Updated GSTATUS registers
* 12-03-2004 BJD Updated include protection
- * 20-07-2004 BJD Added GPIO pin numbers, added Port A definitions
*/
#ifndef __ASM_ARCH_REGS_GPIO_H
#define __ASM_ARCH_REGS_GPIO_H "$Id: gpio.h,v 1.5 2003/05/19 12:51:08 ben Exp $"
-#define S3C2410_GPIONO(bank,offset) ((bank) + (offset))
-
-#define S3C2410_GPIO_BANKA (32*0)
-#define S3C2410_GPIO_BANKB (32*1)
-#define S3C2410_GPIO_BANKC (32*2)
-#define S3C2410_GPIO_BANKD (32*3)
-#define S3C2410_GPIO_BANKE (32*4)
-#define S3C2410_GPIO_BANKF (32*5)
-#define S3C2410_GPIO_BANKG (32*6)
-#define S3C2410_GPIO_BANKH (32*7)
-
-#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C2410_VA_GPIO)
-#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31)
-
-/* general configuration options */
-
-#define S3C2410_GPIO_LEAVE (0xFFFFFFFF)
-
/* configure GPIO ports A..G */
#define S3C2410_GPIOREG(x) ((x) + S3C2410_VA_GPIO)
#define S3C2410_GPACON S3C2410_GPIOREG(0x00)
#define S3C2410_GPADAT S3C2410_GPIOREG(0x04)
-#define S3C2410_GPA0 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 0)
-#define S3C2410_GPA0_OUT (0<<0)
-#define S3C2410_GPA0_ADDR0 (1<<0)
-
-#define S3C2410_GPA1 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 1)
-#define S3C2410_GPA1_OUT (0<<1)
-#define S3C2410_GPA1_ADDR16 (1<<1)
-
-#define S3C2410_GPA2 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 2)
-#define S3C2410_GPA2_OUT (0<<2)
-#define S3C2410_GPA2_ADDR17 (1<<2)
-
-#define S3C2410_GPA3 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 3)
-#define S3C2410_GPA3_OUT (0<<3)
-#define S3C2410_GPA3_ADDR18 (1<<3)
-
-#define S3C2410_GPA4 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 4)
-#define S3C2410_GPA4_OUT (0<<4)
-#define S3C2410_GPA4_ADDR19 (1<<4)
-
-#define S3C2410_GPA5 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 5)
-#define S3C2410_GPA5_OUT (0<<5)
-#define S3C2410_GPA5_ADDR20 (1<<5)
-
-#define S3C2410_GPA6 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 6)
-#define S3C2410_GPA6_OUT (0<<6)
-#define S3C2410_GPA6_ADDR21 (1<<6)
-
-#define S3C2410_GPA7 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 7)
-#define S3C2410_GPA7_OUT (0<<7)
-#define S3C2410_GPA7_ADDR22 (1<<7)
-
-#define S3C2410_GPA8 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 8)
-#define S3C2410_GPA8_OUT (0<<8)
-#define S3C2410_GPA8_ADDR23 (1<<8)
-
-#define S3C2410_GPA9 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 9)
-#define S3C2410_GPA9_OUT (0<<9)
-#define S3C2410_GPA9_ADDR24 (1<<9)
-
-#define S3C2410_GPA10 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 10)
-#define S3C2410_GPA10_OUT (0<<10)
-#define S3C2410_GPA10_ADDR25 (1<<10)
-
-#define S3C2410_GPA11 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 11)
-#define S3C2410_GPA11_OUT (0<<11)
-#define S3C2410_GPA11_ADDR26 (1<<11)
-
-#define S3C2410_GPA12 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 12)
-#define S3C2410_GPA12_OUT (0<<12)
-#define S3C2410_GPA12_nGCS1 (1<<12)
-
-#define S3C2410_GPA13 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 13)
-#define S3C2410_GPA13_OUT (0<<13)
-#define S3C2410_GPA13_nGCS2 (1<<13)
-
-#define S3C2410_GPA14 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 14)
-#define S3C2410_GPA14_OUT (0<<14)
-#define S3C2410_GPA14_nGCS3 (1<<14)
-
-#define S3C2410_GPA15 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 15)
-#define S3C2410_GPA15_OUT (0<<15)
-#define S3C2410_GPA15_nGCS4 (1<<15)
-
-#define S3C2410_GPA16 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 16)
-#define S3C2410_GPA16_OUT (0<<16)
-#define S3C2410_GPA16_nGCS5 (1<<16)
-
-#define S3C2410_GPA17 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 17)
-#define S3C2410_GPA17_OUT (0<<17)
-#define S3C2410_GPA17_CLE (1<<17)
-
-#define S3C2410_GPA18 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 18)
-#define S3C2410_GPA18_OUT (0<<18)
-#define S3C2410_GPA18_ALE (1<<18)
-
-#define S3C2410_GPA19 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 19)
-#define S3C2410_GPA19_OUT (0<<19)
-#define S3C2410_GPA19_nFWE (1<<19)
-
-#define S3C2410_GPA20 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 20)
-#define S3C2410_GPA20_OUT (0<<20)
-#define S3C2410_GPA20_nFRE (1<<20)
-
-#define S3C2410_GPA21 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 21)
-#define S3C2410_GPA21_OUT (0<<21)
-#define S3C2410_GPA21_nRSTOUT (1<<21)
-
-#define S3C2410_GPA22 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 22)
-#define S3C2410_GPA22_OUT (0<<22)
-#define S3C2410_GPA22_nFCE (1<<22)
-
/* 0x08 and 0x0c are reserved */
/* GPB is 10 IO pins, each configured by 2 bits each in GPBCON.
/* no i/o pin in port b can have value 3! */
-#define S3C2410_GPB0 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 0)
#define S3C2410_GPB0_INP (0x00 << 0)
#define S3C2410_GPB0_OUTP (0x01 << 0)
#define S3C2410_GPB0_TOUT0 (0x02 << 0)
-#define S3C2410_GPB1 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 1)
#define S3C2410_GPB1_INP (0x00 << 2)
#define S3C2410_GPB1_OUTP (0x01 << 2)
#define S3C2410_GPB1_TOUT1 (0x02 << 2)
-#define S3C2410_GPB2 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 2)
#define S3C2410_GPB2_INP (0x00 << 4)
#define S3C2410_GPB2_OUTP (0x01 << 4)
#define S3C2410_GPB2_TOUT2 (0x02 << 4)
-#define S3C2410_GPB3 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 3)
#define S3C2410_GPB3_INP (0x00 << 6)
#define S3C2410_GPB3_OUTP (0x01 << 6)
#define S3C2410_GPB3_TOUT3 (0x02 << 6)
-#define S3C2410_GPB4 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 4)
#define S3C2410_GPB4_INP (0x00 << 8)
#define S3C2410_GPB4_OUTP (0x01 << 8)
#define S3C2410_GPB4_TCLK0 (0x02 << 8)
#define S3C2410_GPB4_MASK (0x03 << 8)
-#define S3C2410_GPB5 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 5)
#define S3C2410_GPB5_INP (0x00 << 10)
#define S3C2410_GPB5_OUTP (0x01 << 10)
#define S3C2410_GPB5_nXBACK (0x02 << 10)
-#define S3C2410_GPB6 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 6)
#define S3C2410_GPB6_INP (0x00 << 12)
#define S3C2410_GPB6_OUTP (0x01 << 12)
#define S3C2410_GPB6_nXBREQ (0x02 << 12)
-#define S3C2410_GPB7 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 7)
#define S3C2410_GPB7_INP (0x00 << 14)
#define S3C2410_GPB7_OUTP (0x01 << 14)
#define S3C2410_GPB7_nXDACK1 (0x02 << 14)
-#define S3C2410_GPB8 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 8)
#define S3C2410_GPB8_INP (0x00 << 16)
#define S3C2410_GPB8_OUTP (0x01 << 16)
#define S3C2410_GPB8_nXDREQ1 (0x02 << 16)
-#define S3C2410_GPB9 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 9)
#define S3C2410_GPB9_INP (0x00 << 18)
#define S3C2410_GPB9_OUTP (0x01 << 18)
#define S3C2410_GPB9_nXDACK0 (0x02 << 18)
-#define S3C2410_GPB10 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 10)
-#define S3C2410_GPB10_INP (0x00 << 18)
-#define S3C2410_GPB10_OUTP (0x01 << 18)
+#define S3C2410_GPB10_INP (0x00 << 18)
+#define S3C2410_GPB10_OUTP (0x01 << 18)
#define S3C2410_GPB10_nXDRE0 (0x02 << 18)
/* Port C consits of 16 GPIO/Special function
#define S3C2410_GPCDAT S3C2410_GPIOREG(0x24)
#define S3C2410_GPCUP S3C2410_GPIOREG(0x28)
-#define S3C2410_GPC0 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 0)
#define S3C2410_GPC0_INP (0x00 << 0)
#define S3C2410_GPC0_OUTP (0x01 << 0)
#define S3C2410_GPC0_LEND (0x02 << 0)
-#define S3C2410_GPC1 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 1)
#define S3C2410_GPC1_INP (0x00 << 2)
#define S3C2410_GPC1_OUTP (0x01 << 2)
#define S3C2410_GPC1_VCLK (0x02 << 2)
-#define S3C2410_GPC2 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 2)
#define S3C2410_GPC2_INP (0x00 << 4)
#define S3C2410_GPC2_OUTP (0x01 << 4)
#define S3C2410_GPC2_VLINE (0x02 << 4)
-#define S3C2410_GPC3 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 3)
#define S3C2410_GPC3_INP (0x00 << 6)
#define S3C2410_GPC3_OUTP (0x01 << 6)
#define S3C2410_GPC3_VFRAME (0x02 << 6)
-#define S3C2410_GPC4 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 4)
#define S3C2410_GPC4_INP (0x00 << 8)
#define S3C2410_GPC4_OUTP (0x01 << 8)
#define S3C2410_GPC4_VM (0x02 << 8)
-#define S3C2410_GPC5 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 5)
#define S3C2410_GPC5_INP (0x00 << 10)
#define S3C2410_GPC5_OUTP (0x01 << 10)
#define S3C2410_GPC5_LCDVF0 (0x02 << 10)
-#define S3C2410_GPC6 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 6)
#define S3C2410_GPC6_INP (0x00 << 12)
#define S3C2410_GPC6_OUTP (0x01 << 12)
#define S3C2410_GPC6_LCDVF1 (0x02 << 12)
-#define S3C2410_GPC7 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 7)
#define S3C2410_GPC7_INP (0x00 << 14)
#define S3C2410_GPC7_OUTP (0x01 << 14)
#define S3C2410_GPC7_LCDVF2 (0x02 << 14)
-#define S3C2410_GPC8 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 8)
#define S3C2410_GPC8_INP (0x00 << 16)
#define S3C2410_GPC8_OUTP (0x01 << 16)
#define S3C2410_GPC8_VD0 (0x02 << 16)
-#define S3C2410_GPC9 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 9)
#define S3C2410_GPC9_INP (0x00 << 18)
#define S3C2410_GPC9_OUTP (0x01 << 18)
#define S3C2410_GPC9_VD1 (0x02 << 18)
-#define S3C2410_GPC10 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 10)
#define S3C2410_GPC10_INP (0x00 << 20)
#define S3C2410_GPC10_OUTP (0x01 << 20)
#define S3C2410_GPC10_VD2 (0x02 << 20)
-#define S3C2410_GPC11 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 11)
#define S3C2410_GPC11_INP (0x00 << 22)
#define S3C2410_GPC11_OUTP (0x01 << 22)
#define S3C2410_GPC11_VD3 (0x02 << 22)
-#define S3C2410_GPC12 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 12)
#define S3C2410_GPC12_INP (0x00 << 24)
#define S3C2410_GPC12_OUTP (0x01 << 24)
#define S3C2410_GPC12_VD4 (0x02 << 24)
-#define S3C2410_GPC13 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 13)
#define S3C2410_GPC13_INP (0x00 << 26)
#define S3C2410_GPC13_OUTP (0x01 << 26)
#define S3C2410_GPC13_VD5 (0x02 << 26)
-#define S3C2410_GPC14 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 14)
#define S3C2410_GPC14_INP (0x00 << 28)
#define S3C2410_GPC14_OUTP (0x01 << 28)
#define S3C2410_GPC14_VD6 (0x02 << 28)
-#define S3C2410_GPC15 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 15)
#define S3C2410_GPC15_INP (0x00 << 30)
#define S3C2410_GPC15_OUTP (0x01 << 30)
#define S3C2410_GPC15_VD7 (0x02 << 30)
#define S3C2410_GPDDAT S3C2410_GPIOREG(0x34)
#define S3C2410_GPDUP S3C2410_GPIOREG(0x38)
-#define S3C2410_GPD0 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 0)
#define S3C2410_GPD0_INP (0x00 << 0)
#define S3C2410_GPD0_OUTP (0x01 << 0)
#define S3C2410_GPD0_VD8 (0x02 << 0)
-#define S3C2410_GPD1 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 1)
#define S3C2410_GPD1_INP (0x00 << 2)
#define S3C2410_GPD1_OUTP (0x01 << 2)
#define S3C2410_GPD1_VD9 (0x02 << 2)
-#define S3C2410_GPD2 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 2)
#define S3C2410_GPD2_INP (0x00 << 4)
#define S3C2410_GPD2_OUTP (0x01 << 4)
#define S3C2410_GPD2_VD10 (0x02 << 4)
-#define S3C2410_GPD3 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 3)
#define S3C2410_GPD3_INP (0x00 << 6)
#define S3C2410_GPD3_OUTP (0x01 << 6)
#define S3C2410_GPD3_VD11 (0x02 << 6)
-#define S3C2410_GPD4 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 4)
#define S3C2410_GPD4_INP (0x00 << 8)
#define S3C2410_GPD4_OUTP (0x01 << 8)
#define S3C2410_GPD4_VD12 (0x02 << 8)
-#define S3C2410_GPD5 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 5)
#define S3C2410_GPD5_INP (0x00 << 10)
#define S3C2410_GPD5_OUTP (0x01 << 10)
#define S3C2410_GPD5_VD13 (0x02 << 10)
-#define S3C2410_GPD6 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 6)
#define S3C2410_GPD6_INP (0x00 << 12)
#define S3C2410_GPD6_OUTP (0x01 << 12)
#define S3C2410_GPD6_VD14 (0x02 << 12)
-#define S3C2410_GPD7 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 7)
#define S3C2410_GPD7_INP (0x00 << 14)
#define S3C2410_GPD7_OUTP (0x01 << 14)
#define S3C2410_GPD7_VD15 (0x02 << 14)
-#define S3C2410_GPD8 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 8)
#define S3C2410_GPD8_INP (0x00 << 16)
#define S3C2410_GPD8_OUTP (0x01 << 16)
#define S3C2410_GPD8_VD16 (0x02 << 16)
-#define S3C2410_GPD9 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 9)
#define S3C2410_GPD9_INP (0x00 << 18)
#define S3C2410_GPD9_OUTP (0x01 << 18)
#define S3C2410_GPD9_VD17 (0x02 << 18)
-#define S3C2410_GPD10 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 10)
#define S3C2410_GPD10_INP (0x00 << 20)
#define S3C2410_GPD10_OUTP (0x01 << 20)
#define S3C2410_GPD10_VD18 (0x02 << 20)
-#define S3C2410_GPD11 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 11)
#define S3C2410_GPD11_INP (0x00 << 22)
#define S3C2410_GPD11_OUTP (0x01 << 22)
#define S3C2410_GPD11_VD19 (0x02 << 22)
-#define S3C2410_GPD12 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 12)
#define S3C2410_GPD12_INP (0x00 << 24)
#define S3C2410_GPD12_OUTP (0x01 << 24)
#define S3C2410_GPD12_VD20 (0x02 << 24)
-#define S3C2410_GPD13 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 13)
#define S3C2410_GPD13_INP (0x00 << 26)
#define S3C2410_GPD13_OUTP (0x01 << 26)
#define S3C2410_GPD13_VD21 (0x02 << 26)
-#define S3C2410_GPD14 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 14)
#define S3C2410_GPD14_INP (0x00 << 28)
#define S3C2410_GPD14_OUTP (0x01 << 28)
#define S3C2410_GPD14_VD22 (0x02 << 28)
-#define S3C2410_GPD15 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 15)
#define S3C2410_GPD15_INP (0x00 << 30)
#define S3C2410_GPD15_OUTP (0x01 << 30)
#define S3C2410_GPD15_VD23 (0x02 << 30)
#define S3C2410_GPEDAT S3C2410_GPIOREG(0x44)
#define S3C2410_GPEUP S3C2410_GPIOREG(0x48)
-#define S3C2410_GPE0 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 0)
#define S3C2410_GPE0_INP (0x00 << 0)
#define S3C2410_GPE0_OUTP (0x01 << 0)
#define S3C2410_GPE0_I2SLRCK (0x02 << 0)
#define S3C2410_GPE0_MASK (0x03 << 0)
-#define S3C2410_GPE1 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 1)
#define S3C2410_GPE1_INP (0x00 << 2)
#define S3C2410_GPE1_OUTP (0x01 << 2)
#define S3C2410_GPE1_I2SSCLK (0x02 << 2)
#define S3C2410_GPE1_MASK (0x03 << 2)
-#define S3C2410_GPE2 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 2)
#define S3C2410_GPE2_INP (0x00 << 4)
#define S3C2410_GPE2_OUTP (0x01 << 4)
#define S3C2410_GPE2_CDCLK (0x02 << 4)
-#define S3C2410_GPE3 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 3)
#define S3C2410_GPE3_INP (0x00 << 6)
#define S3C2410_GPE3_OUTP (0x01 << 6)
#define S3C2410_GPE3_I2SSDI (0x02 << 6)
#define S3C2410_GPE3_MASK (0x03 << 6)
-#define S3C2410_GPE4 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 4)
#define S3C2410_GPE4_INP (0x00 << 8)
#define S3C2410_GPE4_OUTP (0x01 << 8)
#define S3C2410_GPE4_I2SSDO (0x02 << 8)
#define S3C2410_GPE4_MASK (0x03 << 8)
-#define S3C2410_GPE5 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 5)
#define S3C2410_GPE5_INP (0x00 << 10)
#define S3C2410_GPE5_OUTP (0x01 << 10)
#define S3C2410_GPE5_SDCLK (0x02 << 10)
-#define S3C2410_GPE6 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 6)
#define S3C2410_GPE6_INP (0x00 << 12)
#define S3C2410_GPE6_OUTP (0x01 << 12)
#define S3C2410_GPE6_SDCLK (0x02 << 12)
-#define S3C2410_GPE7 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 7)
#define S3C2410_GPE7_INP (0x00 << 14)
#define S3C2410_GPE7_OUTP (0x01 << 14)
#define S3C2410_GPE7_SDCMD (0x02 << 14)
-#define S3C2410_GPE8 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 8)
#define S3C2410_GPE8_INP (0x00 << 16)
#define S3C2410_GPE8_OUTP (0x01 << 16)
#define S3C2410_GPE8_SDDAT1 (0x02 << 16)
-#define S3C2410_GPE9 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 9)
#define S3C2410_GPE9_INP (0x00 << 18)
#define S3C2410_GPE9_OUTP (0x01 << 18)
#define S3C2410_GPE9_SDDAT2 (0x02 << 18)
-#define S3C2410_GPE10 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 10)
#define S3C2410_GPE10_INP (0x00 << 20)
#define S3C2410_GPE10_OUTP (0x01 << 20)
#define S3C2410_GPE10_SDDAT3 (0x02 << 20)
-#define S3C2410_GPE11 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 11)
#define S3C2410_GPE11_INP (0x00 << 22)
#define S3C2410_GPE11_OUTP (0x01 << 22)
#define S3C2410_GPE11_SPIMISO0 (0x02 << 22)
-#define S3C2410_GPE12 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 12)
#define S3C2410_GPE12_INP (0x00 << 24)
#define S3C2410_GPE12_OUTP (0x01 << 24)
#define S3C2410_GPE12_SPIMOSI0 (0x02 << 24)
-#define S3C2410_GPE13 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 13)
#define S3C2410_GPE13_INP (0x00 << 26)
#define S3C2410_GPE13_OUTP (0x01 << 26)
#define S3C2410_GPE13_SPICLK0 (0x02 << 26)
-#define S3C2410_GPE14 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 14)
#define S3C2410_GPE14_INP (0x00 << 28)
#define S3C2410_GPE14_OUTP (0x01 << 28)
#define S3C2410_GPE14_IICSCL (0x02 << 28)
#define S3C2410_GPE14_MASK (0x03 << 28)
-#define S3C2410_GPE15 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 15)
#define S3C2410_GPE15_INP (0x00 << 30)
#define S3C2410_GPE15_OUTP (0x01 << 30)
#define S3C2410_GPE15_IICSDA (0x02 << 30)
#define S3C2410_GPFDAT S3C2410_GPIOREG(0x54)
#define S3C2410_GPFUP S3C2410_GPIOREG(0x58)
-#define S3C2410_GPF0 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 0)
+
#define S3C2410_GPF0_INP (0x00 << 0)
#define S3C2410_GPF0_OUTP (0x01 << 0)
#define S3C2410_GPF0_EINT0 (0x02 << 0)
-#define S3C2410_GPF1 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 1)
#define S3C2410_GPF1_INP (0x00 << 2)
#define S3C2410_GPF1_OUTP (0x01 << 2)
#define S3C2410_GPF1_EINT1 (0x02 << 2)
-#define S3C2410_GPF2 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 2)
#define S3C2410_GPF2_INP (0x00 << 4)
#define S3C2410_GPF2_OUTP (0x01 << 4)
#define S3C2410_GPF2_EINT2 (0x02 << 4)
-#define S3C2410_GPF3 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 3)
#define S3C2410_GPF3_INP (0x00 << 6)
#define S3C2410_GPF3_OUTP (0x01 << 6)
#define S3C2410_GPF3_EINT3 (0x02 << 6)
-#define S3C2410_GPF4 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 4)
#define S3C2410_GPF4_INP (0x00 << 8)
#define S3C2410_GPF4_OUTP (0x01 << 8)
#define S3C2410_GPF4_EINT4 (0x02 << 8)
-#define S3C2410_GPF5 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 5)
#define S3C2410_GPF5_INP (0x00 << 10)
#define S3C2410_GPF5_OUTP (0x01 << 10)
#define S3C2410_GPF5_EINT5 (0x02 << 10)
-#define S3C2410_GPF6 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 6)
#define S3C2410_GPF6_INP (0x00 << 12)
#define S3C2410_GPF6_OUTP (0x01 << 12)
#define S3C2410_GPF6_EINT6 (0x02 << 12)
-#define S3C2410_GPF7 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 7)
#define S3C2410_GPF7_INP (0x00 << 14)
#define S3C2410_GPF7_OUTP (0x01 << 14)
#define S3C2410_GPF7_EINT7 (0x02 << 14)
#define S3C2410_GPGDAT S3C2410_GPIOREG(0x64)
#define S3C2410_GPGUP S3C2410_GPIOREG(0x68)
-#define S3C2410_GPG0 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 0)
#define S3C2410_GPG0_INP (0x00 << 0)
#define S3C2410_GPG0_OUTP (0x01 << 0)
#define S3C2410_GPG0_EINT8 (0x02 << 0)
-#define S3C2410_GPG1 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 1)
#define S3C2410_GPG1_INP (0x00 << 2)
#define S3C2410_GPG1_OUTP (0x01 << 2)
#define S3C2410_GPG1_EINT9 (0x02 << 2)
-#define S3C2410_GPG2 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 2)
#define S3C2410_GPG2_INP (0x00 << 4)
#define S3C2410_GPG2_OUTP (0x01 << 4)
#define S3C2410_GPG2_EINT10 (0x02 << 4)
-#define S3C2410_GPG3 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 3)
#define S3C2410_GPG3_INP (0x00 << 6)
#define S3C2410_GPG3_OUTP (0x01 << 6)
#define S3C2410_GPG3_EINT11 (0x02 << 6)
-#define S3C2410_GPG4 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 4)
#define S3C2410_GPG4_INP (0x00 << 8)
#define S3C2410_GPG4_OUTP (0x01 << 8)
#define S3C2410_GPG4_EINT12 (0x02 << 8)
#define S3C2410_GPG4_LCDPWREN (0x03 << 8)
-#define S3C2410_GPG5 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 5)
#define S3C2410_GPG5_INP (0x00 << 10)
#define S3C2410_GPG5_OUTP (0x01 << 10)
#define S3C2410_GPG5_EINT13 (0x02 << 10)
#define S3C2410_GPG5_SPIMISO1 (0x03 << 10)
-#define S3C2410_GPG6 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 6)
#define S3C2410_GPG6_INP (0x00 << 12)
#define S3C2410_GPG6_OUTP (0x01 << 12)
#define S3C2410_GPG6_EINT14 (0x02 << 12)
#define S3C2410_GPG6_SPIMOSI1 (0x03 << 12)
-#define S3C2410_GPG7 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 7)
#define S3C2410_GPG7_INP (0x00 << 14)
#define S3C2410_GPG7_OUTP (0x01 << 14)
#define S3C2410_GPG7_EINT15 (0x02 << 14)
#define S3C2410_GPG7_SPICLK1 (0x03 << 14)
-#define S3C2410_GPG8 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 8)
#define S3C2410_GPG8_INP (0x00 << 16)
#define S3C2410_GPG8_OUTP (0x01 << 16)
#define S3C2410_GPG8_EINT16 (0x02 << 16)
-#define S3C2410_GPG9 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 9)
#define S3C2410_GPG9_INP (0x00 << 18)
#define S3C2410_GPG9_OUTP (0x01 << 18)
#define S3C2410_GPG9_EINT17 (0x02 << 18)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG10_INP (0x00 << 20)
#define S3C2410_GPG10_OUTP (0x01 << 20)
#define S3C2410_GPG10_EINT18 (0x02 << 20)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG11_INP (0x00 << 22)
#define S3C2410_GPG11_OUTP (0x01 << 22)
#define S3C2410_GPG11_EINT19 (0x02 << 22)
#define S3C2410_GPG11_TCLK1 (0x03 << 22)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG12_INP (0x00 << 24)
#define S3C2410_GPG12_OUTP (0x01 << 24)
#define S3C2410_GPG12_EINT18 (0x02 << 24)
#define S3C2410_GPG12_XMON (0x03 << 24)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG13_INP (0x00 << 26)
#define S3C2410_GPG13_OUTP (0x01 << 26)
#define S3C2410_GPG13_EINT18 (0x02 << 26)
#define S3C2410_GPG13_nXPON (0x03 << 26)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG14_INP (0x00 << 28)
#define S3C2410_GPG14_OUTP (0x01 << 28)
#define S3C2410_GPG14_EINT18 (0x02 << 28)
#define S3C2410_GPG14_YMON (0x03 << 28)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG15_INP (0x00 << 30)
#define S3C2410_GPG15_OUTP (0x01 << 30)
#define S3C2410_GPG15_EINT18 (0x02 << 30)
#define S3C2410_GPHDAT S3C2410_GPIOREG(0x74)
#define S3C2410_GPHUP S3C2410_GPIOREG(0x78)
-#define S3C2410_GPH0 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 0)
#define S3C2410_GPH0_INP (0x00 << 0)
#define S3C2410_GPH0_OUTP (0x01 << 0)
#define S3C2410_GPH0_nCTS0 (0x02 << 0)
-#define S3C2410_GPH1 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 1)
#define S3C2410_GPH1_INP (0x00 << 2)
#define S3C2410_GPH1_OUTP (0x01 << 2)
#define S3C2410_GPH1_nRTS0 (0x02 << 2)
-#define S3C2410_GPH2 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 2)
#define S3C2410_GPH2_INP (0x00 << 4)
#define S3C2410_GPH2_OUTP (0x01 << 4)
#define S3C2410_GPH2_TXD0 (0x02 << 4)
-#define S3C2410_GPH3 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 3)
#define S3C2410_GPH3_INP (0x00 << 6)
#define S3C2410_GPH3_OUTP (0x01 << 6)
#define S3C2410_GPH3_RXD0 (0x02 << 6)
-#define S3C2410_GPH4 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 4)
#define S3C2410_GPH4_INP (0x00 << 8)
#define S3C2410_GPH4_OUTP (0x01 << 8)
#define S3C2410_GPH4_TXD1 (0x02 << 8)
-#define S3C2410_GPH5 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 5)
#define S3C2410_GPH5_INP (0x00 << 10)
#define S3C2410_GPH5_OUTP (0x01 << 10)
#define S3C2410_GPH5_RXD1 (0x02 << 10)
-#define S3C2410_GPH6 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 6)
#define S3C2410_GPH6_INP (0x00 << 12)
#define S3C2410_GPH6_OUTP (0x01 << 12)
#define S3C2410_GPH6_TXD2 (0x02 << 12)
#define S3C2410_GPH6_nRTS1 (0x03 << 12)
-#define S3C2410_GPH7 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 7)
#define S3C2410_GPH7_INP (0x00 << 14)
#define S3C2410_GPH7_OUTP (0x01 << 14)
#define S3C2410_GPH7_RXD2 (0x02 << 14)
#define S3C2410_GPH7_nCTS1 (0x03 << 14)
-#define S3C2410_GPH8 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 8)
#define S3C2410_GPH8_INP (0x00 << 16)
#define S3C2410_GPH8_OUTP (0x01 << 16)
#define S3C2410_GPH8_UCLK (0x02 << 16)
-#define S3C2410_GPH9 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 9)
-#define S3C2410_GPH9_INP (0x00 << 18)
-#define S3C2410_GPH9_OUTP (0x01 << 18)
-#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
+#define S3C2410_GPH9_INP (0x00 << 18)
+#define S3C2410_GPH9_OUTP (0x01 << 18)
+#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
-#define S3C2410_GPH10 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 10)
-#define S3C2410_GPH10_INP (0x00 << 20)
-#define S3C2410_GPH10_OUTP (0x01 << 20)
-#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
+#define S3C2410_GPH10_INP (0x00 << 20)
+#define S3C2410_GPH10_OUTP (0x01 << 20)
+#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
/* miscellaneous control */
extern int _find_first_zero_bit_be(void * p, unsigned size);
extern int _find_next_zero_bit_be(void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+extern int _find_next_bit_be(unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See Documentation/cachetlb.txt for more information.
+ * See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
#define MAX_HWIFS 4
#endif
-#if defined(CONFIG_ARCH_SA1100)
+#if defined(CONFIG_ARCH_LH7A40X) || defined(CONFIG_ARCH_SA1100)
# include <asm/arch/ide.h> /* obsolete + broken */
#endif
-#if !defined(CONFIG_ARCH_L7200)
+#if !defined(CONFIG_ARCH_L7200) && !defined(CONFIG_ARCH_LH7A40X)
# define IDE_ARCH_OBSOLETE_INIT
# ifdef CONFIG_ARCH_CLPS7500
# define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
# else
# define ide_default_io_ctl(base) (0)
# endif
-#endif /* !ARCH_L7200 */
+#endif /* !ARCH_L7200 && !ARCH_LH7A40X */
#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * linux/Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
* See arch/arm/kernel/sys-arm.c for ugly details..
*/
struct ipc_kludge {
- struct msgbuf __user *msgp;
+ struct msgbuf *msgp;
long msgtyp;
};
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See linux/Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
+typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void __user *ss_sp;
+ void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
- __chk_user_ptr(addr); \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
#define get_user(x,p) \
({ \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ const register typeof(*(p)) *__p asm("r0") = (p); \
register typeof(*(p)) __r1 asm("r1"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
- __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
#define put_user(x,p) \
({ \
const register typeof(*(p)) __r1 asm("r1") = (x); \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ const register typeof(*(p)) *__p asm("r0") = (p); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * linux/Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See linux/Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _CRIS_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define page_test_and_clear_young(page) (0)
#endif
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
#endif /* _ASM_GENERIC_PGTABLE_H */
#include <linux/config.h>
#include <linux/swap.h>
-#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <linux/vs_memory.h>
/*
* For UP we don't need to worry about TLB flush
#endif /* __ASSEMBLY__ */
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _H8300_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
+++ /dev/null
-/*
- * Kernel header file for Linux crash dumps.
- *
- * Created by: Matt Robinson (yakker@sgi.com)
- *
- * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/* This header file holds the architecture specific crash dump header */
-#ifndef _ASM_DUMP_H
-#define _ASM_DUMP_H
-
-/* necessary header files */
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#include <linux/threads.h>
-#include <linux/mm.h>
-
-/* definitions */
-#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
-#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
-
-/* max number of cpus */
-#define DUMP_MAX_NUM_CPUS 32
-
-/*
- * Structure: __dump_header_asm
- * Function: This is the header for architecture-specific stuff. It
- * follows right after the dump header.
- */
-struct __dump_header_asm {
- /* the dump magic number -- unique to verify dump is valid */
- u64 dha_magic_number;
-
- /* the version number of this dump */
- u32 dha_version;
-
- /* the size of this header (in case we can't read it) */
- u32 dha_header_size;
-
- /* the esp for i386 systems */
- u32 dha_esp;
-
- /* the eip for i386 systems */
- u32 dha_eip;
-
- /* the dump registers */
- struct pt_regs dha_regs;
-
- /* smp specific */
- u32 dha_smp_num_cpus;
- u32 dha_dumping_cpu;
- struct pt_regs dha_smp_regs[DUMP_MAX_NUM_CPUS];
- u32 dha_smp_current_task[DUMP_MAX_NUM_CPUS];
- u32 dha_stack[DUMP_MAX_NUM_CPUS];
- u32 dha_stack_ptr[DUMP_MAX_NUM_CPUS];
-} __attribute__((packed));
-
-#ifdef __KERNEL__
-
-extern struct __dump_header_asm dump_header_asm;
-
-#ifdef CONFIG_SMP
-extern cpumask_t irq_affinity[];
-extern int (*dump_ipi_function_ptr)(struct pt_regs *);
-extern void dump_send_ipi(void);
-#else
-#define dump_send_ipi() do { } while(0)
-#endif
-
-static inline void get_current_regs(struct pt_regs *regs)
-{
- __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
- __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
- __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
- __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
- __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
- __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
- __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
- __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
- __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
- __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
- __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
- __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
- __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
- regs->eip = (unsigned long)current_text_addr();
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_DUMP_H */
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
+/* child inherits the personality of the parent */
#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
-/*
- * An executable for which elf_read_implies_exec() returns TRUE will
- * have the READ_IMPLIES_EXEC personality flag set automatically.
- */
-#define elf_read_implies_exec_binary(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
-
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
KM_SOFTIRQ1,
KM_NETDUMP,
KM_UNUSED,
- KM_TYPE_NR,
- KM_DUMP
+ KM_TYPE_NR
};
#endif
#define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb
-#define DUMP_VECTOR 0xfa
#define THERMAL_APIC_VECTOR 0xf0
/*
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
return order;
}
-extern int devmem_is_allowed(unsigned long pagenr);
-
#endif /* __ASSEMBLY__ */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#define check_pgt_cache() do { } while (0)
+#define HAVE_ARCH_UNMAPPED_AREA 1
+
#endif /* _I386_PGALLOC_H */
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
+#define SHLIB_BASE 0x00111000
+
#define __HAVE_ARCH_ALIGN_STACK
extern unsigned long arch_align_stack(unsigned long sp);
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#define __HAVE_ARCH_MMAP_TOP
+extern unsigned long mmap_top(void);
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
extern cpumask_t cpu_sibling_map[];
extern void smp_flush_tlb(void);
-extern void dump_send_ipi(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
*/
#if !defined(IN_STRING_C)
-#define __HAVE_ARCH_STRCPY
static inline char * strcpy(char * dest,const char *src)
{
int d0, d1, d2;
return dest;
}
-#define __HAVE_ARCH_STRNCPY
static inline char * strncpy(char * dest,const char *src,size_t count)
{
int d0, d1, d2, d3;
return count;
}
-#define __HAVE_ARCH_STRCAT
static inline char * strcat(char * dest,const char * src)
{
int d0, d1, d2, d3;
return dest;
}
-#define __HAVE_ARCH_STRNCAT
static inline char * strncat(char * dest,const char * src,size_t count)
{
int d0, d1, d2, d3;
return dest;
}
-#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char * cs,const char * ct)
{
int d0, d1;
return __res;
}
-#define __HAVE_ARCH_STRNCMP
static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
return __res;
}
-#define __HAVE_ARCH_STRCHR
static inline char * strchr(const char * s, int c)
{
int d0;
return __res;
}
-#define __HAVE_ARCH_STRRCHR
static inline char * strrchr(const char * s, int c)
{
int d0, d1;
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
+#include <linux/compiler.h>
#include <asm/page.h>
#define VERIFY_READ 0
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
might_sleep(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
: "m"(__m(addr)), "i"(errret), "0"(err))
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
-unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long
+static inline unsigned long __must_check
__direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static inline unsigned long __must_check
__direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long
+static inline unsigned long __must_check
direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static inline unsigned long __must_check
direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
#define __NR_sys_kexec_load 283
-#define __NR_ioprio_set 284
-#define __NR_ioprio_get 285
-#define NR_syscalls 286
+#define NR_syscalls 284
-#ifndef __KERNEL_SYSCALLS_NO_ERRNO__
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
#define __syscall_return(type, res) \
return (type) (res); \
} while (0)
-#else
-# define __syscall_return(type, res) return (type) (res)
-#endif
-
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
#define _syscall0(type,name) \
type name(void) \
* won't be any messing with the stack from main(), but we define
* some others too.
*/
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone;
-extern void __init cyclone_setup(void);
+extern int __init cyclone_setup(char*);
#else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0
-static inline void cyclone_setup(void)
+static inline void cyclone_setup(char* s)
{
printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n");
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
-#define elf_read_implies_exec(ex, have_pt_gnu_stack) \
- (!(have_pt_gnu_stack) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
+struct elf64_hdr;
+extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter);
+#define SET_PERSONALITY(ex, ibcs2) ia64_set_personality(&(ex), ibcs2)
struct task_struct;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
-typedef u8 ia64_mv_irq_to_vector (unsigned int);
-typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef u8 ia64_mv_irq_to_vector (u8);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
-#elif defined(CONFIG_IA64_HP_ZX1)
-/* Max 32 Nodes */
-#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 256 Nodes */
#define NODES_SHIFT 8
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
- (((current->personality & READ_IMPLIES_EXEC) != 0) \
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
? VM_EXEC : 0))
-#define devmem_is_allowed(x) 1
-
#endif /* _ASM_IA64_PAGE_H */
static inline pgd_t*
pgd_alloc_one_fast (struct mm_struct *mm)
{
- unsigned long *ret = NULL;
+ unsigned long *ret = pgd_quicklist;
- preempt_disable();
-
- ret = pgd_quicklist;
if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
} else
ret = NULL;
-
- preempt_enable();
-
return (pgd_t *) ret;
}
static inline void
pgd_free (pgd_t *pgd)
{
- preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
++pgtable_cache_size;
- preempt_enable();
}
static inline void
static inline pmd_t*
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{
- unsigned long *ret = NULL;
+ unsigned long *ret = (unsigned long *)pmd_quicklist;
- preempt_disable();
-
- ret = (unsigned long *)pmd_quicklist;
if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
}
-
- preempt_enable();
-
return (pmd_t *)ret;
}
static inline void
pmd_free (pmd_t *pmd)
{
- preempt_disable();
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
++pgtable_cache_size;
- preempt_enable();
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
-/* Look up a pgd entry in the gate area. On IA-64, the gate-area
- resides in the kernel-mapped segment, hence we use pgd_offset_k()
- here. */
-#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PGD_OFFSET_GATE
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
#define _ASM_IA64_PROCESSOR_H
/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
/* bit 5 is currently unused */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
+#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
#define IA64_THREAD_UAC_SHIFT 3
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
-#include <linux/compiler.h>
-#include <asm/intrinsics.h>
-extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
+extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
+#include <asm/intrinsics.h>
#define __sn_mf_a() ia64_mfa()
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
-#define SN_SAL_MIN_MAJOR 0x3 /* SN2 kernels need at least PROM 3.40 */
-#define SN_SAL_MIN_MINOR 0x40
+#define SN_SAL_MIN_MAJOR 0x1 /* SN2 kernels need at least PROM 1.0 */
+#define SN_SAL_MIN_MINOR 0x0
u64 ia64_sn_probe_io_slot(long paddr, long size, void *data_ptr);
#define __NR_syslog 1117
#define __NR_setitimer 1118
#define __NR_getitimer 1119
-#define __NR_tux 1120 /* was __NR_old_stat */
+/* 1120 was __NR_old_stat */
/* 1121 was __NR_old_lstat */
/* 1122 was __NR_old_fstat */
#define __NR_vhangup 1123
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
-static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
+static inline void __constant_set_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
+static inline void __generic_set_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
-static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
+static inline void __constant_clear_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
+static inline void __generic_clear_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
-#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cache.h>
struct fp_ext temp[2];
};
-#ifdef FPU_EMU_DEBUG
+#if FPU_EMU_DEBUG
extern unsigned int fp_debugprint;
#define dprint(bit, fmt, args...) ({ \
#define _MOTOROLA_PGALLOC_H
#include <asm/tlb.h>
-#include <asm/tlbflush.h>
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _M68K_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
atomic_t count;
atomic_t waking;
wait_queue_head_t wait;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
long __magic;
#endif
};
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
# define __SEM_DEBUG_INIT(name) \
, (long)&(name).__magic
#else
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
__free_page(page);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+{
+ tlb_remove_page(tlb, page);
+}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
#endif /* __ASSEMBLY__ */
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _M68KNOMMU_PAGE_H */
#define WANT_PAGE_VIRTUAL
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* _ASM_PAGE_H */
#ifdef __LP64__
#define LDREG ldd
#define STREG std
-#define LDREGX ldd,s
#define LDREGM ldd,mb
#define STREGM std,ma
#define RP_OFFSET 16
#else
#define LDREG ldw
#define STREG stw
-#define LDREGX ldwx,s
#define LDREGM ldwm
#define STREGM stwm
#define RP_OFFSET 20
#define FRAME_SIZE 64
#endif
-#ifdef CONFIG_PA20
-#define BL b,l
-#else
-#define BL bl
-#endif
-
#ifdef __ASSEMBLY__
#ifdef __LP64__
depd,z \r, 63-\sa, 64-\sa, \t
.endm
- /* Shift Right - note the r and t can NOT be the same! */
- .macro shr r, sa, t
- extru \r, 31-\sa, 32-\sa, \t
- .endm
-
- /* pa20w version of shift right */
- .macro shrd r, sa, t
- extrd,u \r, 63-\sa, 64-\sa, \t
- .endm
-
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-static __inline__ void set_bit(int nr, volatile unsigned long * address)
+static __inline__ void set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __set_bit(int nr, volatile unsigned long * address)
+static __inline__ void __set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr |= mask;
}
-static __inline__ void clear_bit(int nr, volatile unsigned long * address)
+static __inline__ void clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
+static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr &= ~mask;
}
-static __inline__ void change_bit(int nr, volatile unsigned long * address)
+static __inline__ void change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __change_bit(int nr, volatile unsigned long * address)
+static __inline__ void __change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr ^= mask;
}
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_bit(int nr, const volatile unsigned long *address)
+static __inline__ int test_bit(int nr, const void *address)
{
unsigned long mask;
- const unsigned long *addr = (const unsigned long *)address;
+ unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
-static inline int sched_find_first_bit(const unsigned long *b)
+static inline int sched_find_first_bit(unsigned long *b)
{
#ifndef __LP64__
if (unlikely(b[0]))
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
-static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
{
- const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
+ unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
return result + ffz(tmp);
}
-static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
{
- const unsigned long *p = addr + (offset >> 6);
+ unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
* disabling interrupts.
*/
#ifdef __LP64__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, addr)
#else
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, addr)
#endif
#endif /* __KERNEL__ */
#endif
}
-extern void flush_dcache_page(struct page *page);
+extern void __flush_dcache_page(struct page *page);
+
+static inline void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping)) {
+ set_bit(PG_dcache_dirty, &page->flags);
+ } else {
+ __flush_dcache_page(page);
+ }
+}
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
-static inline pte_t *__translation_exists(struct mm_struct *mm,
- unsigned long addr)
+static inline int translation_exists(struct vm_area_struct *vma,
+ unsigned long addr)
{
- pgd_t *pgd = pgd_offset(mm, addr);
+ pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t *pte;
if(pgd_none(*pgd))
- return NULL;
+ return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
- return NULL;
+ return 0;
pte = pte_offset_map(pmd, addr);
/* The PA flush mappings show up as pte_none, but they're
* valid none the less */
if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
- return NULL;
- return pte;
+ return 0;
+ return 1;
}
-#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
/* Private function to flush a page from the cache of a non-current
#include <linux/mm.h>
#include <linux/config.h>
#include <asm/cacheflush.h>
-#include <asm/scatterlist.h>
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
#define HPHW_IOA 12
#define HPHW_BRIDGE 13
#define HPHW_FABRIC 14
-#define HPHW_MC 15
#define HPHW_FAULTY 31
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
-#ifdef CONFIG_DISCONTIGMEM
-
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
-extern int npmem_ranges;
-
struct node_map_data {
pg_data_t pg_data;
+ struct page *adj_node_mem_map;
};
extern struct node_map_data node_data[];
+extern unsigned char *chunkmap;
-#define NODE_DATA(nid) (&node_data[nid].pg_data)
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
-#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
-})
-#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
-
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
-#define pfn_to_page(pfn) \
-({ \
- unsigned long __pfn = (pfn); \
- int __node = pfn_to_nid(__pfn); \
- &node_mem_map(__node)[node_localnr(__pfn,__node)]; \
-})
-
-#define page_to_pfn(pg) \
-({ \
- struct page *__page = pg; \
- struct zone *__zone = page_zone(__page); \
- BUG_ON(__zone == NULL); \
- (unsigned long)(__page - __zone->zone_mem_map) \
- + __zone->zone_start_pfn; \
-})
+#define BADCHUNK ((unsigned char)0xff)
+#define CHUNKSZ (256*1024*1024)
+#define CHUNKSHIFT 28
+#define CHUNKMASK (~(CHUNKSZ - 1))
+#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
-/* We have these possible memory map layouts:
- * Astro: 0-3.75, 67.75-68, 4-64
- * zx1: 0-1, 257-260, 4-256
- * Stretch (N-class): 0-2, 4-32, 34-xxx
- */
-
-/* Since each 1GB can only belong to one region (node), we can create
- * an index table for pfn to nid lookup; each entry in pfnnid_map
- * represents 1GB, and contains the node that the memory belongs to. */
-
-#define PFNNID_SHIFT (30 - PAGE_SHIFT)
-#define PFNNID_MAP_MAX 512 /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
-
-#ifndef __LP64__
-#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
-#else
-/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
-#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
-#endif
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- unsigned int i;
- unsigned char r;
-
- if (unlikely(pfn_is_io(pfn)))
- return 0;
-
- i = pfn >> PFNNID_SHIFT;
- BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
- r = pfnnid_map[i];
- BUG_ON(r == 0xff);
+#define NODE_DATA(nid) (&node_data[nid].pg_data)
+#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
+#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
- return (int)r;
-}
+#define phys_to_page(paddr) \
+ (ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
+ + ((paddr) >> PAGE_SHIFT))
-static inline int pfn_valid(int pfn)
-{
- int nid = pfn_to_nid(pfn);
+#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
- if (nid >= 0)
- return (pfn < node_end_pfn(nid));
- return 0;
-}
+/* This is kind of bogus, need to investigate performance of doing it right */
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#else /* !CONFIG_DISCONTIGMEM */
-#define MAX_PHYSMEM_RANGES 1
-#endif
-#endif /* _PARISC_MMZONE_H */
+#endif /* !_PARISC_MMZONE_H */
+++ /dev/null
-#ifndef _ASM_MAX_NUMNODES_H
-#define _ASM_MAX_NUMNODES_H
-
-#include <linux/config.h>
-
-/* Max 8 Nodes */
-#define NODES_SHIFT 3
-
-#endif /* _ASM_MAX_NUMNODES_H */
#else
#define pte_flags(x) ((x).flags)
#endif
-
-/* These do not work lvalues, so make sure we don't use them as such. */
-#define pmd_val(x) ((x).pmd + 0)
-#define pgd_val(x) ((x).pgd + 0)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
-#define __pmd_val_set(x,n) (x).pmd = (n)
-#define __pgd_val_set(x,n) (x).pgd = (n)
-
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
return order;
}
+#ifdef __LP64__
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+#else
+#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
+#endif
+
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif /* CONFIG_DISCONTIGMEM */
-
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PARISC_PAGE_H */
*/
#define PCI_MAX_BUSSES 256
+/* [soapbox on]
+** Who the hell can develop stuff without ASSERT or VASSERT?
+** No one understands all the modules across all platforms.
+** For linux add another dimension - processor architectures.
+**
+** This should be a standard/global macro used liberally
+** in all code. Every respectable engineer I know in HP
+** would support this argument. - grant
+** [soapbox off]
+*/
+#ifdef PCI_DEBUG
+#define ASSERT(expr) \
+ if(!(expr)) { \
+ printk("\n%s:%d: Assertion " #expr " failed!\n", \
+ __FILE__, __LINE__); \
+ panic(#expr); \
+ }
+#else
+#define ASSERT(expr)
+#endif
+
+
/*
** pci_hba_data (aka H2P_OBJECT in HP/UX)
**
#define OSTAT_RUN 6
#define OSTAT_ON 7
+#ifdef __LP64__
+/* PDC PAT CELL */
+#define PDC_PAT_CELL 64L /* Interface for gaining and
+ * manipulating cell state within PD */
+#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
+#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
+#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
+#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
+#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
+#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
+#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
+#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
+#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size*/
+#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
+#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
+#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
+#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
+#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Checking */
+
+/*
+** Arg to PDC_PAT_CELL_MODULE memaddr[4]
+**
+** Addresses on the Merced Bus != all Runway Bus addresses.
+** This is intended for programming SBA/LBA chips range registers.
+*/
+#define IO_VIEW 0UL
+#define PA_VIEW 1UL
+
+/* PDC_PAT_CELL_MODULE entity type values */
+#define PAT_ENTITY_CA 0 /* central agent */
+#define PAT_ENTITY_PROC 1 /* processor */
+#define PAT_ENTITY_MEM 2 /* memory controller */
+#define PAT_ENTITY_SBA 3 /* system bus adapter */
+#define PAT_ENTITY_LBA 4 /* local bus adapter */
+#define PAT_ENTITY_PBC 5 /* processor bus converter */
+#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
+#define PAT_ENTITY_RC 7 /* fabric interconnect */
+
+/* PDC_PAT_CELL_MODULE address range type values */
+#define PAT_PBNUM 0 /* PCI Bus Number */
+#define PAT_LMMIO 1 /* < 4G MMIO Space */
+#define PAT_GMMIO 2 /* > 4G MMIO Space */
+#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
+#define PAT_PIOP 4 /* Postable I/O Port Space */
+#define PAT_AHPA 5 /* Additional HPA Space */
+#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
+#define PAT_GNIP 7 /* GNI Reserved Space */
+
+
+/* PDC PAT CHASSIS LOG */
+#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
+ ** progress functions */
+#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
+#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+
+
+/* PDC PAT CPU */
+#define PDC_PAT_CPU 67L /* Interface to CPU configuration
+ * within the protection domain */
+#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
+#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
+#define PDC_PAT_CPU_ADD 2L /* Add CPU */
+#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
+#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
+#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
+#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
+#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
+#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
+#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
+ * Cleansing Mode */
+
+/* PDC PAT EVENT */
+#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
+#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
+#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
+#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
+#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
+#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args*/
+
+/* PDC PAT HPMC */
+#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
+ ** loop, and wait for wake up from
+ ** Monarch Processor */
+#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
+#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
+ * will use to interrupt OS during machine
+ * check rendezvous */
+
+/* parameters for PDC_PAT_HPMC_SET_PARAMS */
+#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
+#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
+
+/* PDC PAT IO */
+#define PDC_PAT_IO 71L /* On-line services for I/O modules */
+#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info */
+#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
+ /* Hardware Path */
+#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
+ * Physical Location */
+#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
+ * Address from Hardware Path */
+#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
+ * from PCI Configuration Address */
+#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
+#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
+ * Size */
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
+#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
+#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
+#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
+#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
+#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
+ * Cabinet */
+#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
+ /* Bay Slots in Cabinet */
+#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
+#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
+#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
+
+/* PDC PAT MEM */
+#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
+#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
+#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
+#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
+#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
+#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
+#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
+#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
+#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
+#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
+#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
+#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From*/
+ /* Memory Address */
+#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
+#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
+#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
+#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
+#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
+#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
+#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
+#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
+
+/* PDC PAT NVOLATILE */
+#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory*/
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+
+/* PDC PAT PD */
+#define PDC_PAT_PD 74L /* Protection Domain Info */
+#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+#define PAT_MEMORY_DESCRIPTOR 1
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+#define PAT_MEMTYPE_MEMORY 0
+#define PAT_MEMTYPE_FIRMWARE 4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+#define PAT_MEMUSE_GENERAL 0
+#define PAT_MEMUSE_GI 128
+#define PAT_MEMUSE_GNI 129
+#endif /* __LP64__ */
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
+#ifdef CONFIG_PARISC64
+#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
+#else
+#define is_pdc_pat() (0)
+#endif
+
struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
unsigned long actcnt; /* actual number of bytes returned */
unsigned long maxcnt; /* maximum number of bytes that could be returned */
#ifdef __LP64__
cc_padW:32,
#endif
- cc_alias: 4, /* alias boundaries for virtual addresses */
+ cc_alias:4, /* alias boundaries for virtual addresses */
cc_block: 4, /* to determine most efficient stride */
cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
- cc_shift: 2, /* how much to shift cc_block left */
+ cc_pad0 : 2, /* reserved */
cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
unsigned long tod_usec;
};
+#ifdef __LP64__
+struct pdc_pat_cell_num {
+ unsigned long cell_num;
+ unsigned long cell_loc;
+};
+
+struct pdc_pat_cpu_num {
+ unsigned long cpu_num;
+ unsigned long cpu_loc;
+};
+
+struct pdc_pat_pd_addr_map_entry {
+ unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
+ unsigned char reserve1[5];
+ unsigned char memory_type;
+ unsigned char memory_usage;
+ unsigned long paddr;
+ unsigned int pages; /* Length in 4K pages */
+ unsigned int reserve2;
+ unsigned long cell_map;
+};
+
+/* FIXME: mod[508] should really be a union of the various mod components */
+struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
+ unsigned long cba; /* function 0 configuration space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ struct hardware_path mod_path; /* hardware path */
+ unsigned long mod[508]; /* PAT cell module components */
+};
+
+typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
+#endif /* __LP64__ */
+
/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
struct pdc_hpmc_pim_11 { /* PDC_PIM */
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg);
+#ifdef __LP64__
+int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
+int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
+int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
+ unsigned long view_type, void *mem_addr);
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
+int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
+int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
+ unsigned long count, unsigned long offset);
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
+* ----------------------------------------------------------
+* Bit 0 to 51 - conf_base_addr
+* Bit 52 to 62 - reserved
+* Bit 63 - endianess bit
+********************************************************************/
+#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
+* ----------------------------------------------------
+* Bit 0 to 7 - entity type
+* 0 = central agent, 1 = processor,
+* 2 = memory controller, 3 = system bus adapter,
+* 4 = local bus adapter, 5 = processor bus converter,
+* 6 = crossbar fabric connect, 7 = fabric interconnect,
+* 8 to 254 reserved, 255 = unknown.
+* Bit 8 to 15 - DVI
+* Bit 16 to 23 - IOC functions
+* Bit 24 to 39 - reserved
+* Bit 40 to 63 - mod_pages
+* number of 4K pages a module occupies starting at conf_base_addr
+********************************************************************/
+#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
+#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
+#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
+#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
+
+#else /* !__LP64__ */
+/* No PAT support for 32-bit kernels...sorry */
+#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
+#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
+#endif /* !__LP64__ */
+
extern void pdc_init(void);
#endif /* __ASSEMBLY__ */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
- * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
+ * Copyright (c) Hewlett Packard (Paul Bame <bame@puffin.external.hp.com>)
+ * Copyright 2000 (c) Grant Grundler <grundler@puffin.external.hp.com>
*/
+/* PDC PAT CELL */
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulatin g cell state within PD */
#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
#define PAT_GNIP 7 /* GNI Reserved Space */
+/* PDC PAT CHASSIS LOG */
-/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
-
-#define PDC_PAT_CHASSIS_LOG 65L
+#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
+ ** progress functions */
#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+/* PDC PAT CPU */
-/* PDC PAT CPU -- CPU configuration within the protection domain */
-
-#define PDC_PAT_CPU 67L
+#define PDC_PAT_CPU 67L /* Interface to CPU configuration
+ * within the protection domain */
#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
#define PDC_PAT_CPU_ADD 2L /* Add CPU */
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
-/* PDC PAT EVENT -- Platform Events */
+/* PDC PAT EVENT */
-#define PDC_PAT_EVENT 68L
+#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */
-/* PDC PAT HPMC -- Cause processor to go into spin loop, and wait
- * for wake up from Monarch Processor.
- */
+/* PDC PAT HPMC */
-#define PDC_PAT_HPMC 70L
+#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
+ ** loop, and wait for wake up from
+ ** Monarch Processor */
#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
- * will use to interrupt OS during
- * machine check rendezvous */
+ * will use to interrupt OS during machine
+ * check rendezvous */
/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
+/* PDC PAT IO */
-/* PDC PAT IO -- On-line services for I/O modules */
-
-#define PDC_PAT_IO 71L
+#define PDC_PAT_IO 71L /* On-line services for I/O modules */
#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/
#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
/* Hardware Path */
#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
+/* PDC PAT MEM */
-/* PDC PAT MEM -- Manage memory page deallocation */
-
-#define PDC_PAT_MEM 72L
+#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
+/* PDC PAT NVOLATILE */
-/* PDC PAT NVOLATILE -- Access Non-Volatile Memory */
-
-#define PDC_PAT_NVOLATILE 73L
-#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
-#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
-#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
-
-/* PDC PAT PD */
-#define PDC_PAT_PD 74L /* Protection Domain Info */
-#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
-
-/* PDC_PAT_PD_GET_ADDR_MAP entry types */
-#define PAT_MEMORY_DESCRIPTOR 1
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory types */
-#define PAT_MEMTYPE_MEMORY 0
-#define PAT_MEMTYPE_FIRMWARE 4
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
-#define PAT_MEMUSE_GENERAL 0
-#define PAT_MEMUSE_GI 128
-#define PAT_MEMUSE_GNI 129
-
+#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
#ifndef __ASSEMBLY__
#include <linux/types.h>
-#ifdef CONFIG_PARISC64
-#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
-extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
-extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
-#else /* ! CONFIG_PARISC64 */
-/* No PAT support for 32-bit kernels...sorry */
-#define is_pdc_pat() (0)
-#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
-#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
-#endif /* ! CONFIG_PARISC64 */
-
-
-struct pdc_pat_cell_num {
- unsigned long cell_num;
- unsigned long cell_loc;
-};
-
-struct pdc_pat_cpu_num {
- unsigned long cpu_num;
- unsigned long cpu_loc;
-};
-
-struct pdc_pat_pd_addr_map_entry {
- unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
- unsigned char reserve1[5];
- unsigned char memory_type;
- unsigned char memory_usage;
- unsigned long paddr;
- unsigned int pages; /* Length in 4K pages */
- unsigned int reserve2;
- unsigned long cell_map;
-};
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
-* ----------------------------------------------------------
-* Bit 0 to 51 - conf_base_addr
-* Bit 52 to 62 - reserved
-* Bit 63 - endianess bit
-********************************************************************/
-#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
-* ----------------------------------------------------
-* Bit 0 to 7 - entity type
-* 0 = central agent, 1 = processor,
-* 2 = memory controller, 3 = system bus adapter,
-* 4 = local bus adapter, 5 = processor bus converter,
-* 6 = crossbar fabric connect, 7 = fabric interconnect,
-* 8 to 254 reserved, 255 = unknown.
-* Bit 8 to 15 - DVI
-* Bit 16 to 23 - IOC functions
-* Bit 24 to 39 - reserved
-* Bit 40 to 63 - mod_pages
-* number of 4K pages a module occupies starting at conf_base_addr
-********************************************************************/
-#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
-#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
-#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
-#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
-
-
/*
** PDC_PAT_CELL_GET_INFO return block
*/
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
- unsigned long cba; /* func 0 cfg space address */
- unsigned long mod_info; /* module information */
- unsigned long mod_location; /* physical location of the module */
- struct hardware_path mod_path; /* module path (device path - layers) */
+ unsigned long cba; /* function 0 configuration space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ unsigned long mod_path; /* module path (device path - layers) */
unsigned long mod[508]; /* PAT cell module components */
} __attribute__((aligned(8))) ;
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
-extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
-extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
-extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
+extern int pdc_pat_cell_get_number(void *);
+extern int pdc_pat_cell_module(void *, unsigned long, unsigned long, unsigned long, void *);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
-extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
-
-extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
-
-
-extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
-extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
-
-
/* Flag to indicate this is a PAT box...don't use this unless you
** really have to...it might go away some day.
*/
+#ifdef __LP64__
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
+#endif
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
* kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|GFP_DMA,
PGD_ALLOC_ORDER);
pgd_t *actual_pgd = pgd;
#ifdef __LP64__
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
- * with PxD_FLAG_ATTACHED as a signal to the system that this
+ * with _PAGE_GATEWAY as a signal to the system that this
* pmd entry may not be cleared. */
- __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
+ pgd_val(*actual_pgd) = (_PAGE_TABLE | _PAGE_GATEWAY) +
+ (__u32)__pa((unsigned long)pgd);
/* The first pmd entry also is marked with _PAGE_GATEWAY as
* a signal that this pmd may not be freed */
- __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
+ pgd_val(*pgd) = _PAGE_GATEWAY;
#endif
}
return actual_pgd;
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
- __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
- (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ pgd_val(*pgd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pmd);
}
+/* NOTE: pmd must be in ZONE_DMA (<4GB) so the pgd pointer can be
+ * housed in 32 bits */
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|GFP_DMA,
PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
static inline void pmd_free(pmd_t *pmd)
{
#ifdef __LP64__
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
return;
#ifdef __LP64__
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
+ pmd_val(*pmd) = (_PAGE_TABLE | _PAGE_GATEWAY)
+ + (__u32)__pa((unsigned long)pte);
else
#endif
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+ pmd_val(*pmd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pte);
}
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+/* NOTE: pte must be in ZONE_DMA (<4GB) so that the pmd pointer
+ * can be housed in 32 bits */
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
if (likely(page != NULL))
clear_page(page_address(page));
return page;
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
if (likely(pte != NULL))
clear_page(pte);
return pte;
/* This is the size of the initially mapped kernel memory (i.e. currently
* 0 to 1<<23 == 8MB */
-#ifdef CONFIG_64BIT
-#define KERNEL_INITIAL_ORDER 24
-#else
#define KERNEL_INITIAL_ORDER 23
-#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
-#ifdef CONFIG_64BIT
+#ifdef __LP64__
#define PT_NLEVELS 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
-/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
- * are page-aligned, we don't care about the PAGE_OFFSET bits, except
- * for a few meta-information bits, so we shift the address to be
- * able to effectively address 40-bits of physical address space. */
-#define _PxD_PRESENT_BIT 31
-#define _PxD_ATTACHED_BIT 30
-#define _PxD_VALID_BIT 29
-
-#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
-#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
-#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
-#define PxD_FLAG_MASK (0xf)
-#define PxD_FLAG_SHIFT (4)
-#define PxD_VALUE_SHIFT (8)
-
#ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
-#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
-#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
-#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-
-#ifdef CONFIG_64BIT
+#ifdef __LP64__
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
-#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
+#define pmd_none(x) (!pmd_val(x) || pmd_val(x) == _PAGE_GATEWAY)
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pmd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
#else
#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
#endif
-#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
-#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#ifdef CONFIG_64BIT
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+#ifdef __LP64__
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
- __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
+ pmd_val(*pmd) = _PAGE_GATEWAY;
else
#endif
- __pmd_val_set(*pmd, 0);
+ pmd_val(*pmd) = 0;
}
#if PT_NLEVELS == 3
-#define pgd_page(pgd) ((unsigned long) __va(pgd_address(pgd)))
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
/* For 64 bit we have three level tables */
#define pgd_none(x) (!pgd_val(x))
-#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
-#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
+#ifdef __LP64__
+#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pgd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
+#else
+#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
+#endif
+#define pgd_present(x) (pgd_val(x) & _PAGE_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
-#ifdef CONFIG_64BIT
- if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
+#ifdef __LP64__
+ if(pgd_val(*pgd) & _PAGE_GATEWAY)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
return;
#endif
- __pgd_val_set(*pgd, 0);
+ pgd_val(*pgd) = 0;
}
#else
/*
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
-#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+#ifdef CONFIG_DISCONTIGMEM
+#define pte_page(x) (phys_to_page(pte_val(x)))
+#else
+#define pte_page(x) (mem_map+(pte_val(x) >> PAGE_SHIFT))
+#endif
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define __pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#ifdef CONFIG_SMP
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
+ return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_young(pte))
#ifdef CONFIG_SMP
if (!pte_dirty(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
+ return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_dirty(pte))
#endif
}
+#ifdef CONFIG_SMP
extern spinlock_t pa_dbit_lock;
+#else
+static int pa_dbit_lock; /* dummy to keep the compilers happy */
+#endif
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
static inline void ptep_mkdirty(pte_t *ptep)
{
#ifdef CONFIG_SMP
- set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
+ set_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu)
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#endif /* CONFIG_SMP */
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
-static inline int __cpu_disable (void) {
- return 0;
-}
-static inline void __cpu_die (unsigned int cpu) {
- while(1)
- ;
-}
-extern int __cpu_up (unsigned int cpu);
#endif /* __ASM_SMP_H */
* the semaphore address has to be 16-byte aligned.
*/
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { { 1, 1, 1, 1 } }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
return __ldcw(a) != 0;
}
-#define spin_lock_own(LOCK, LOCATION) ((void)0)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-#define SPINLOCK_MAGIC 0x1D244B3C
-
-#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define CHECK_LOCK(x) \
- do { \
- if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
- printk(KERN_ERR "%s:%d: spin_is_locked" \
- " on uninitialized spinlock %p.\n", \
- __FILE__, __LINE__, (x)); \
- } \
- } while(0)
-
-#define spin_is_locked(x) \
- ({ \
- CHECK_LOCK(x); \
- volatile unsigned int *a = __ldcw_align(x); \
- if (unlikely((*a == 0) && (x)->babble)) { \
- (x)->babble--; \
- printk("KERN_WARNING \
- %s:%d: spin_is_locked(%s/%p) already" \
- " locked by %s:%d in %s at %p(%d)\n", \
- __FILE__,__LINE__, (x)->module, (x), \
- (x)->bfile, (x)->bline, (x)->task->comm,\
- (x)->previous, (x)->oncpu); \
- } \
- *a == 0; \
- })
-
-#define spin_unlock_wait(x) \
- do { \
- CHECK_LOCK(x); \
- volatile unsigned int *a = __ldcw_align(x); \
- if (unlikely((*a == 0) && (x)->babble)) { \
- (x)->babble--; \
- printk("KERN_WARNING \
- %s:%d: spin_unlock_wait(%s/%p)" \
- " owned by %s:%d in %s at %p(%d)\n", \
- __FILE__,__LINE__, (x)->module, (x), \
- (x)->bfile, (x)->bline, (x)->task->comm,\
- (x)->previous, (x)->oncpu); \
- } \
- barrier(); \
- } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
-
-extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
-extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
-extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
-#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
-#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
-
-/* just in case we need it */
-#define spin_lock_own(LOCK, LOCATION) \
-do { \
- volatile unsigned int *a = __ldcw_align(LOCK); \
- if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
- printk("KERN_WARNING \
- %s: called on %d from %p but lock %s on %d\n", \
- LOCATION, smp_processor_id(), \
- __builtin_return_address(0), \
- (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-
-#endif /* !(CONFIG_DEBUG_SPINLOCK) */
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
volatile int counter;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
+#define RW_LOCK_UNLOCKED (rwlock_t) { { { 1, 1, 1, 1 } }, 0 }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
-#else
static __inline__ void _raw_read_lock(rwlock_t *rw)
{
unsigned long flags;
_raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_RWLOCK */
static __inline__ void _raw_read_unlock(rwlock_t *rw)
{
* writers) in interrupt handlers someone fucked up and we'd dead-lock
* sooner or later anyway. prumpf */
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
-#else
static __inline__ void _raw_write_lock(rwlock_t *rw)
{
retry:
/* got it. now leave without unlocking */
rw->counter = -1; /* remember we are locked */
}
-#endif /* CONFIG_DEBUG_RWLOCK */
/* write_unlock is absolutely trivial - we don't have to wait for anything */
typedef struct {
volatile unsigned int lock[4];
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned long magic;
- volatile unsigned int babble;
- const char *module;
- char *bfile;
- int bline;
- int oncpu;
- void *previous;
- struct task_struct * task;
-#endif
} spinlock_t;
#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain;/* execution domain */
- unsigned long flags; /* thread_info flags (see TIF_*) */
- mm_segment_t addr_limit; /* user-level address space limit */
+ __u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
- __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+ mm_segment_t addr_limit; /* user-level address space limit */
struct restart_block restart_block;
+ __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
#define INIT_THREAD_INFO(tsk) \
return sys_close(fd);
}
-static inline void _exit(int exitcode)
+static inline int _exit(int exitcode)
{
- sys_exit(exitcode);
+ return sys_exit(exitcode);
}
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
};
struct unwind_frame_info {
+ unsigned long sp;
+ unsigned long ip;
struct task_struct *t;
/* Eventually we would like to be able to get at any of the registers
available; but for now we only try to get the sp and ip for each
frame */
/* struct pt_regs regs; */
- unsigned long sp, ip, rp;
unsigned long prev_sp, prev_ip;
};
void * unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
- void *start, void *end);
+ const void *start, const void *end);
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
- unsigned long sp, unsigned long ip, unsigned long rp);
+ struct pt_regs *regs);
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
int unwind_once(struct unwind_frame_info *info);
int unwind_to_user(struct unwind_frame_info *info);
int *src_err, int *dst_err);
#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
- csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
+ csum_partial_copy_generic((src), (dst), (len), (sum), (errp), 0)
/* FIXME: this needs to be written to really do no check -- Cort */
#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+ csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
/*
* turns a 32-bit partial checksum (e.g. from csum_partial) into a
#define CPM_DATAONLY_SIZE ((uint)0x0700)
#define CPM_DP_NOSPACE ((uint)0x7fffffff)
-static inline long IS_DPERR(const uint offset)
-{
- return (uint)offset > (uint)-1000L;
-}
-
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm8xx_t *cpmp; /* Pointer to comm processor */
-extern uint cpm_dpalloc(uint size, uint align);
-extern int cpm_dpfree(uint offset);
-extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
-extern void cpm_dpdump(void);
-extern void *cpm_dpram_addr(uint offset);
-extern void cpm_setbrg(uint brg, uint rate);
-
+extern void *m8xx_cpm_dpalloc(int size);
+extern int m8xx_cpm_dpfree(void *addr);
+extern void *m8xx_cpm_dpalloc_fixed(void *addr, int size);
+extern void m8xx_cpm_dpdump(void);
+extern int m8xx_cpm_dpram_offset(void *addr);
+extern void *m8xx_cpm_dpram_addr(int offset);
uint m8xx_cpm_hostalloc(uint size);
+void m8xx_cpm_setbrg(uint brg, uint rate);
/* Buffer descriptors used by many of the CPM protocols.
*/
*/
#define NUM_CPM_HOST_PAGES 2
-static inline long IS_DPERR(const uint offset)
-{
- return (uint)offset > (uint)-1000L;
-}
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm_cpm2_t *cpmp; /* Pointer to comm processor */
-extern uint cpm_dpalloc(uint size, uint align);
-extern int cpm_dpfree(uint offset);
-extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
-extern void cpm_dpdump(void);
-extern void *cpm_dpram_addr(uint offset);
-extern void cpm_setbrg(uint brg, uint rate);
+extern void *cpm2_dpalloc(uint size, uint align);
+extern int cpm2_dpfree(void *addr);
+extern void *cpm2_dpalloc_fixed(void *addr, uint size, uint allign);
+extern void cpm2_dpdump(void);
+extern unsigned int cpm2_dpram_offset(void *addr);
+extern void *cpm2_dpram_addr(int offset);
+extern void cpm2_setbrg(uint brg, uint rate);
extern void cpm2_fastbrg(uint brg, uint rate, int div16);
/* Buffer descriptors used by many of the CPM protocols.
#define CPU_FTR_NO_DPM 0x00008000
#define CPU_FTR_HAS_HIGH_BATS 0x00010000
#define CPU_FTR_NEED_COHERENT 0x00020000
-#define CPU_FTR_NO_BTIC 0x00040000
#ifdef __ASSEMBLY__
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 01000000 /* tux hack */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
BUG_ON(!pte_none(*(kmap_pte+idx)));
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
- flush_tlb_page(NULL, vaddr);
+ flush_tlb_page(0, vaddr);
return (void*) vaddr;
}
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
- flush_tlb_page(NULL, vaddr);
+ flush_tlb_page(0, vaddr);
#endif
dec_preempt_count();
preempt_check_resched();
{
#ifndef CONFIG_APUS
if (address == 0)
- return NULL;
+ return 0;
return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
#else
return (void*) mm_ptov (address);
return irq;
}
-#elif defined(CONFIG_CPM2) && defined(CONFIG_85xx)
-/* Now include the board configuration specific associations.
-*/
-#include <asm/mpc85xx.h>
-
-/* The MPC8560 openpic has 32 internal interrupts and 12 external
- * interrupts.
- *
- * We are "flattening" the interrupt vectors of the cascaded CPM
- * so that we can uniquely identify any interrupt source with a
- * single integer.
- */
-#define NR_CPM_INTS 64
-#define NR_EPIC_INTS 44
-#ifndef NR_8259_INTS
-#define NR_8259_INTS 0
-#endif
-#define NUM_8259_INTERRUPTS NR_8259_INTS
-
-#ifndef CPM_IRQ_OFFSET
-#define CPM_IRQ_OFFSET 0
-#endif
-
-#define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS)
-
-/* These values must be zero-based and map 1:1 with the EPIC configuration.
- * They are used throughout the 8560 I/O subsystem to generate
- * interrupt masks, flags, and other control patterns. This is why the
- * current kernel assumption of the 8259 as the base controller is such
- * a pain in the butt.
- */
-
-#define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET)
-#define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET)
-#define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET)
-#define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET)
-#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
-#define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET)
-#define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET)
-#define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
-#define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET)
-#define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET)
-#define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET)
-#define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET)
-#define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET)
-#define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET)
-#define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET)
-#define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET)
-#define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET)
-#define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET)
-#define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET)
-#define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET)
-#define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET)
-#define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET)
-#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
-#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
-
-static __inline__ int irq_canonicalize(int irq)
-{
- return irq;
-}
-
#else /* CONFIG_40x + CONFIG_8xx */
/*
* this is the # irq's for all ppc arch's (pmac/chrp/prep)
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
-#elif defined(CONFIG_E500)
+#elif CONFIG_E500
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
+++ /dev/null
-/*
- * include/asm-ppc/mpc52xx.h
- *
- * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
- * May need to be cleaned as the port goes on ...
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Originally written by Dale Farnsworth <dfarnsworth@mvista.com>
- * for the 2.4 kernel.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ASM_MPC52xx_H__
-#define __ASM_MPC52xx_H__
-
-#ifndef __ASSEMBLY__
-#include <asm/ppcboot.h>
-#include <asm/types.h>
-
-struct pt_regs;
-struct ocp_def;
-#endif /* __ASSEMBLY__ */
-
-
-/* ======================================================================== */
-/* Main registers/struct addresses */
-/* ======================================================================== */
-/* Theses are PHYSICAL addresses ! */
-/* TODO : There should be no static mapping, but it's not yet the case, so */
-/* we require a 1:1 mapping */
-
-#define MPC52xx_MBAR 0xf0000000 /* Phys address */
-#define MPC52xx_MBAR_SIZE 0x00010000
-#define MPC52xx_MBAR_VIRT 0xf0000000 /* Virt address */
-
-#define MPC52xx_MMAP_CTL (MPC52xx_MBAR + 0x0000)
-#define MPC52xx_CDM (MPC52xx_MBAR + 0x0200)
-#define MPC52xx_SFTRST (MPC52xx_MBAR + 0x0220)
-#define MPC52xx_SFTRST_BIT 0x01000000
-#define MPC52xx_INTR (MPC52xx_MBAR + 0x0500)
-#define MPC52xx_GPTx(x) (MPC52xx_MBAR + 0x0600 + ((x)<<4))
-#define MPC52xx_RTC (MPC52xx_MBAR + 0x0800)
-#define MPC52xx_MSCAN1 (MPC52xx_MBAR + 0x0900)
-#define MPC52xx_MSCAN2 (MPC52xx_MBAR + 0x0980)
-#define MPC52xx_GPIO (MPC52xx_MBAR + 0x0b00)
-#define MPC52xx_PCI (MPC52xx_MBAR + 0x0d00)
-#define MPC52xx_USB_OHCI (MPC52xx_MBAR + 0x1000)
-#define MPC52xx_SDMA (MPC52xx_MBAR + 0x1200)
-#define MPC52xx_XLB (MPC52xx_MBAR + 0x1f00)
-#define MPC52xx_PSCx(x) (MPC52xx_MBAR + 0x2000 + ((x)<<9))
-#define MPC52xx_PSC1 (MPC52xx_MBAR + 0x2000)
-#define MPC52xx_PSC2 (MPC52xx_MBAR + 0x2200)
-#define MPC52xx_PSC3 (MPC52xx_MBAR + 0x2400)
-#define MPC52xx_PSC4 (MPC52xx_MBAR + 0x2600)
-#define MPC52xx_PSC5 (MPC52xx_MBAR + 0x2800)
-#define MPC52xx_PSC6 (MPC52xx_MBAR + 0x2C00)
-#define MPC52xx_FEC (MPC52xx_MBAR + 0x3000)
-#define MPC52xx_ATA (MPC52xx_MBAR + 0x3a00)
-#define MPC52xx_I2C1 (MPC52xx_MBAR + 0x3d00)
-#define MPC52xx_I2C_MICR (MPC52xx_MBAR + 0x3d20)
-#define MPC52xx_I2C2 (MPC52xx_MBAR + 0x3d40)
-
-/* SRAM used for SDMA */
-#define MPC52xx_SRAM (MPC52xx_MBAR + 0x8000)
-#define MPC52xx_SRAM_SIZE (16*1024)
-#define MPC52xx_SDMA_MAX_TASKS 16
-
- /* Memory allocation block size */
-#define MPC52xx_SDRAM_UNIT 0x8000 /* 32K byte */
-
-
-/* ======================================================================== */
-/* IRQ mapping */
-/* ======================================================================== */
-/* Be sure to look at mpc52xx_pic.h if you wish for whatever reason to change
- * this
- */
-
-#define MPC52xx_CRIT_IRQ_NUM 4
-#define MPC52xx_MAIN_IRQ_NUM 17
-#define MPC52xx_SDMA_IRQ_NUM 17
-#define MPC52xx_PERP_IRQ_NUM 23
-
-#define MPC52xx_CRIT_IRQ_BASE 0
-#define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM)
-#define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM)
-#define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)
-
-#define MPC52xx_IRQ0 (MPC52xx_CRIT_IRQ_BASE + 0)
-#define MPC52xx_SLICE_TIMER_0_IRQ (MPC52xx_CRIT_IRQ_BASE + 1)
-#define MPC52xx_HI_INT_IRQ (MPC52xx_CRIT_IRQ_BASE + 2)
-#define MPC52xx_CCS_IRQ (MPC52xx_CRIT_IRQ_BASE + 3)
-
-#define MPC52xx_IRQ1 (MPC52xx_MAIN_IRQ_BASE + 1)
-#define MPC52xx_IRQ2 (MPC52xx_MAIN_IRQ_BASE + 2)
-#define MPC52xx_IRQ3 (MPC52xx_MAIN_IRQ_BASE + 3)
-
-#define MPC52xx_SDMA_IRQ (MPC52xx_PERP_IRQ_BASE + 0)
-#define MPC52xx_PSC1_IRQ (MPC52xx_PERP_IRQ_BASE + 1)
-#define MPC52xx_PSC2_IRQ (MPC52xx_PERP_IRQ_BASE + 2)
-#define MPC52xx_PSC3_IRQ (MPC52xx_PERP_IRQ_BASE + 3)
-#define MPC52xx_PSC6_IRQ (MPC52xx_PERP_IRQ_BASE + 4)
-#define MPC52xx_IRDA_IRQ (MPC52xx_PERP_IRQ_BASE + 4)
-#define MPC52xx_FEC_IRQ (MPC52xx_PERP_IRQ_BASE + 5)
-#define MPC52xx_USB_IRQ (MPC52xx_PERP_IRQ_BASE + 6)
-#define MPC52xx_ATA_IRQ (MPC52xx_PERP_IRQ_BASE + 7)
-#define MPC52xx_PCI_CNTRL_IRQ (MPC52xx_PERP_IRQ_BASE + 8)
-#define MPC52xx_PCI_SCIRX_IRQ (MPC52xx_PERP_IRQ_BASE + 9)
-#define MPC52xx_PCI_SCITX_IRQ (MPC52xx_PERP_IRQ_BASE + 10)
-#define MPC52xx_PSC4_IRQ (MPC52xx_PERP_IRQ_BASE + 11)
-#define MPC52xx_PSC5_IRQ (MPC52xx_PERP_IRQ_BASE + 12)
-#define MPC52xx_SPI_MODF_IRQ (MPC52xx_PERP_IRQ_BASE + 13)
-#define MPC52xx_SPI_SPIF_IRQ (MPC52xx_PERP_IRQ_BASE + 14)
-#define MPC52xx_I2C1_IRQ (MPC52xx_PERP_IRQ_BASE + 15)
-#define MPC52xx_I2C2_IRQ (MPC52xx_PERP_IRQ_BASE + 16)
-#define MPC52xx_CAN1_IRQ (MPC52xx_PERP_IRQ_BASE + 17)
-#define MPC52xx_CAN2_IRQ (MPC52xx_PERP_IRQ_BASE + 18)
-#define MPC52xx_IR_RX_IRQ (MPC52xx_PERP_IRQ_BASE + 19)
-#define MPC52xx_IR_TX_IRQ (MPC52xx_PERP_IRQ_BASE + 20)
-#define MPC52xx_XLB_ARB_IRQ (MPC52xx_PERP_IRQ_BASE + 21)
-
-
-
-/* ======================================================================== */
-/* Structures mapping of some unit register set */
-/* ======================================================================== */
-
-#ifndef __ASSEMBLY__
-
-/* Memory Mapping Control */
-struct mpc52xx_mmap_ctl {
- volatile u32 mbar; /* MMAP_CTRL + 0x00 */
-
- volatile u32 cs0_start; /* MMAP_CTRL + 0x04 */
- volatile u32 cs0_stop; /* MMAP_CTRL + 0x08 */
- volatile u32 cs1_start; /* MMAP_CTRL + 0x0c */
- volatile u32 cs1_stop; /* MMAP_CTRL + 0x10 */
- volatile u32 cs2_start; /* MMAP_CTRL + 0x14 */
- volatile u32 cs2_stop; /* MMAP_CTRL + 0x18 */
- volatile u32 cs3_start; /* MMAP_CTRL + 0x1c */
- volatile u32 cs3_stop; /* MMAP_CTRL + 0x20 */
- volatile u32 cs4_start; /* MMAP_CTRL + 0x24 */
- volatile u32 cs4_stop; /* MMAP_CTRL + 0x28 */
- volatile u32 cs5_start; /* MMAP_CTRL + 0x2c */
- volatile u32 cs5_stop; /* MMAP_CTRL + 0x30 */
-
- volatile u32 sdram0; /* MMAP_CTRL + 0x34 */
- volatile u32 sdram1; /* MMAP_CTRL + 0X38 */
-
- volatile u32 reserved[4]; /* MMAP_CTRL + 0x3c .. 0x48 */
-
- volatile u32 boot_start; /* MMAP_CTRL + 0x4c */
- volatile u32 boot_stop; /* MMAP_CTRL + 0x50 */
-
- volatile u32 ipbi_ws_ctrl; /* MMAP_CTRL + 0x54 */
-
- volatile u32 cs6_start; /* MMAP_CTRL + 0x58 */
- volatile u32 cs6_stop; /* MMAP_CTRL + 0x5c */
- volatile u32 cs7_start; /* MMAP_CTRL + 0x60 */
- volatile u32 cs7_stop; /* MMAP_CTRL + 0x60 */
-};
-
-/* Interrupt controller */
-struct mpc52xx_intr {
- volatile u32 per_mask; /* INTR + 0x00 */
- volatile u32 per_pri1; /* INTR + 0x04 */
- volatile u32 per_pri2; /* INTR + 0x08 */
- volatile u32 per_pri3; /* INTR + 0x0c */
- volatile u32 ctrl; /* INTR + 0x10 */
- volatile u32 main_mask; /* INTR + 0x14 */
- volatile u32 main_pri1; /* INTR + 0x18 */
- volatile u32 main_pri2; /* INTR + 0x1c */
- volatile u32 reserved1; /* INTR + 0x20 */
- volatile u32 enc_status; /* INTR + 0x24 */
- volatile u32 crit_status; /* INTR + 0x28 */
- volatile u32 main_status; /* INTR + 0x2c */
- volatile u32 per_status; /* INTR + 0x30 */
- volatile u32 reserved2; /* INTR + 0x34 */
- volatile u32 per_error; /* INTR + 0x38 */
-};
-
-/* SDMA */
-struct mpc52xx_sdma {
- volatile u32 taskBar; /* SDMA + 0x00 */
- volatile u32 currentPointer; /* SDMA + 0x04 */
- volatile u32 endPointer; /* SDMA + 0x08 */
- volatile u32 variablePointer;/* SDMA + 0x0c */
-
- volatile u8 IntVect1; /* SDMA + 0x10 */
- volatile u8 IntVect2; /* SDMA + 0x11 */
- volatile u16 PtdCntrl; /* SDMA + 0x12 */
-
- volatile u32 IntPend; /* SDMA + 0x14 */
- volatile u32 IntMask; /* SDMA + 0x18 */
-
- volatile u16 tcr[16]; /* SDMA + 0x1c .. 0x3a */
-
- volatile u8 ipr[31]; /* SDMA + 0x3c .. 5b */
-
- volatile u32 res1; /* SDMA + 0x5c */
- volatile u32 task_size0; /* SDMA + 0x60 */
- volatile u32 task_size1; /* SDMA + 0x64 */
- volatile u32 MDEDebug; /* SDMA + 0x68 */
- volatile u32 ADSDebug; /* SDMA + 0x6c */
- volatile u32 Value1; /* SDMA + 0x70 */
- volatile u32 Value2; /* SDMA + 0x74 */
- volatile u32 Control; /* SDMA + 0x78 */
- volatile u32 Status; /* SDMA + 0x7c */
-};
-
-/* GPT */
-struct mpc52xx_gpt {
- volatile u32 mode; /* GPTx + 0x00 */
- volatile u32 count; /* GPTx + 0x04 */
- volatile u32 pwm; /* GPTx + 0x08 */
- volatile u32 status; /* GPTx + 0X0c */
-};
-
-/* RTC */
-struct mpc52xx_rtc {
- volatile u32 time_set; /* RTC + 0x00 */
- volatile u32 date_set; /* RTC + 0x04 */
- volatile u32 stopwatch; /* RTC + 0x08 */
- volatile u32 int_enable; /* RTC + 0x0c */
- volatile u32 time; /* RTC + 0x10 */
- volatile u32 date; /* RTC + 0x14 */
- volatile u32 stopwatch_intr; /* RTC + 0x18 */
- volatile u32 bus_error; /* RTC + 0x1c */
- volatile u32 dividers; /* RTC + 0x20 */
-};
-
-/* GPIO */
-struct mpc52xx_gpio {
- volatile u32 port_config; /* GPIO + 0x00 */
- volatile u32 simple_gpioe; /* GPIO + 0x04 */
- volatile u32 simple_ode; /* GPIO + 0x08 */
- volatile u32 simple_ddr; /* GPIO + 0x0c */
- volatile u32 simple_dvo; /* GPIO + 0x10 */
- volatile u32 simple_ival; /* GPIO + 0x14 */
- volatile u8 outo_gpioe; /* GPIO + 0x18 */
- volatile u8 reserved1[3]; /* GPIO + 0x19 */
- volatile u8 outo_dvo; /* GPIO + 0x1c */
- volatile u8 reserved2[3]; /* GPIO + 0x1d */
- volatile u8 sint_gpioe; /* GPIO + 0x20 */
- volatile u8 reserved3[3]; /* GPIO + 0x21 */
- volatile u8 sint_ode; /* GPIO + 0x24 */
- volatile u8 reserved4[3]; /* GPIO + 0x25 */
- volatile u8 sint_ddr; /* GPIO + 0x28 */
- volatile u8 reserved5[3]; /* GPIO + 0x29 */
- volatile u8 sint_dvo; /* GPIO + 0x2c */
- volatile u8 reserved6[3]; /* GPIO + 0x2d */
- volatile u8 sint_inten; /* GPIO + 0x30 */
- volatile u8 reserved7[3]; /* GPIO + 0x31 */
- volatile u16 sint_itype; /* GPIO + 0x34 */
- volatile u16 reserved8; /* GPIO + 0x36 */
- volatile u8 gpio_control; /* GPIO + 0x38 */
- volatile u8 reserved9[3]; /* GPIO + 0x39 */
- volatile u8 sint_istat; /* GPIO + 0x3c */
- volatile u8 sint_ival; /* GPIO + 0x3d */
- volatile u8 bus_errs; /* GPIO + 0x3e */
- volatile u8 reserved10; /* GPIO + 0x3f */
-};
-
-#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD 4
-#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD 5
-#define MPC52xx_GPIO_PCI_DIS (1<<15)
-
-/* XLB Bus control */
-struct mpc52xx_xlb {
- volatile u8 reserved[0x40];
- volatile u32 config; /* XLB + 0x40 */
- volatile u32 version; /* XLB + 0x44 */
- volatile u32 status; /* XLB + 0x48 */
- volatile u32 int_enable; /* XLB + 0x4c */
- volatile u32 addr_capture; /* XLB + 0x50 */
- volatile u32 bus_sig_capture; /* XLB + 0x54 */
- volatile u32 addr_timeout; /* XLB + 0x58 */
- volatile u32 data_timeout; /* XLB + 0x5c */
- volatile u32 bus_act_timeout; /* XLB + 0x60 */
- volatile u32 master_pri_enable; /* XLB + 0x64 */
- volatile u32 master_priority; /* XLB + 0x68 */
- volatile u32 base_address; /* XLB + 0x6c */
- volatile u32 snoop_window; /* XLB + 0x70 */
-};
-
-
-/* Clock Distribution control */
-struct mpc52xx_cdm {
- volatile u32 jtag_id; /* MBAR_CDM + 0x00 reg0 read only */
- volatile u32 rstcfg; /* MBAR_CDM + 0x04 reg1 read only */
- volatile u32 breadcrumb; /* MBAR_CDM + 0x08 reg2 */
-
- volatile u8 mem_clk_sel; /* MBAR_CDM + 0x0c reg3 byte0 */
- volatile u8 xlb_clk_sel; /* MBAR_CDM + 0x0d reg3 byte1 read only */
- volatile u8 ipb_clk_sel; /* MBAR_CDM + 0x0e reg3 byte2 */
- volatile u8 pci_clk_sel; /* MBAR_CDM + 0x0f reg3 byte3 */
-
- volatile u8 ext_48mhz_en; /* MBAR_CDM + 0x10 reg4 byte0 */
- volatile u8 fd_enable; /* MBAR_CDM + 0x11 reg4 byte1 */
- volatile u16 fd_counters; /* MBAR_CDM + 0x12 reg4 byte2,3 */
-
- volatile u32 clk_enables; /* MBAR_CDM + 0x14 reg5 */
-
- volatile u8 osc_disable; /* MBAR_CDM + 0x18 reg6 byte0 */
- volatile u8 reserved0[3]; /* MBAR_CDM + 0x19 reg6 byte1,2,3 */
-
- volatile u8 ccs_sleep_enable;/* MBAR_CDM + 0x1c reg7 byte0 */
- volatile u8 osc_sleep_enable;/* MBAR_CDM + 0x1d reg7 byte1 */
- volatile u8 reserved1; /* MBAR_CDM + 0x1e reg7 byte2 */
- volatile u8 ccs_qreq_test; /* MBAR_CDM + 0x1f reg7 byte3 */
-
- volatile u8 soft_reset; /* MBAR_CDM + 0x20 u8 byte0 */
- volatile u8 no_ckstp; /* MBAR_CDM + 0x21 u8 byte0 */
- volatile u8 reserved2[2]; /* MBAR_CDM + 0x22 u8 byte1,2,3 */
-
- volatile u8 pll_lock; /* MBAR_CDM + 0x24 reg9 byte0 */
- volatile u8 pll_looselock; /* MBAR_CDM + 0x25 reg9 byte1 */
- volatile u8 pll_sm_lockwin; /* MBAR_CDM + 0x26 reg9 byte2 */
- volatile u8 reserved3; /* MBAR_CDM + 0x27 reg9 byte3 */
-
- volatile u16 reserved4; /* MBAR_CDM + 0x28 reg10 byte0,1 */
- volatile u16 mclken_div_psc1;/* MBAR_CDM + 0x2a reg10 byte2,3 */
-
- volatile u16 reserved5; /* MBAR_CDM + 0x2c reg11 byte0,1 */
- volatile u16 mclken_div_psc2;/* MBAR_CDM + 0x2e reg11 byte2,3 */
-
- volatile u16 reserved6; /* MBAR_CDM + 0x30 reg12 byte0,1 */
- volatile u16 mclken_div_psc3;/* MBAR_CDM + 0x32 reg12 byte2,3 */
-
- volatile u16 reserved7; /* MBAR_CDM + 0x34 reg13 byte0,1 */
- volatile u16 mclken_div_psc6;/* MBAR_CDM + 0x36 reg13 byte2,3 */
-};
-
-#endif /* __ASSEMBLY__ */
-
-
-/* ========================================================================= */
-/* Prototypes for MPC52xx syslib */
-/* ========================================================================= */
-
-#ifndef __ASSEMBLY__
-
-extern void mpc52xx_init_irq(void);
-extern int mpc52xx_get_irq(struct pt_regs *regs);
-
-extern unsigned long mpc52xx_find_end_of_memory(void);
-extern void mpc52xx_set_bat(void);
-extern void mpc52xx_map_io(void);
-extern void mpc52xx_restart(char *cmd);
-extern void mpc52xx_halt(void);
-extern void mpc52xx_power_off(void);
-extern void mpc52xx_progress(char *s, unsigned short hex);
-extern void mpc52xx_calibrate_decr(void);
-extern void mpc52xx_add_board_devices(struct ocp_def board_ocp[]);
-
-#endif /* __ASSEMBLY__ */
-
-
-/* ========================================================================= */
-/* Platform configuration */
-/* ========================================================================= */
-
-/* The U-Boot platform information struct */
-extern bd_t __res;
-
-/* Platform options */
-#if defined(CONFIG_LITE5200)
-#include <platforms/lite5200.h>
-#endif
-
-
-#endif /* __ASM_MPC52xx_H__ */
+++ /dev/null
-/*
- * include/asm-ppc/mpc52xx_psc.h
- *
- * Definitions of consts/structs to drive the Freescale MPC52xx OnChip
- * PSCs. Theses are shared between multiple drivers since a PSC can be
- * UART, AC97, IR, I2S, ... So this header is in asm-ppc.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based/Extracted from some header of the 2.4 originally written by
- * Dale Farnsworth <dfarnsworth@mvista.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __MPC52xx_PSC_H__
-#define __MPC52xx_PSC_H__
-
-#include <asm/types.h>
-
-/* Max number of PSCs */
-#define MPC52xx_PSC_MAXNUM 6
-
-/* Programmable Serial Controller (PSC) status register bits */
-#define MPC52xx_PSC_SR_CDE 0x0080
-#define MPC52xx_PSC_SR_RXRDY 0x0100
-#define MPC52xx_PSC_SR_RXFULL 0x0200
-#define MPC52xx_PSC_SR_TXRDY 0x0400
-#define MPC52xx_PSC_SR_TXEMP 0x0800
-#define MPC52xx_PSC_SR_OE 0x1000
-#define MPC52xx_PSC_SR_PE 0x2000
-#define MPC52xx_PSC_SR_FE 0x4000
-#define MPC52xx_PSC_SR_RB 0x8000
-
-/* PSC Command values */
-#define MPC52xx_PSC_RX_ENABLE 0x0001
-#define MPC52xx_PSC_RX_DISABLE 0x0002
-#define MPC52xx_PSC_TX_ENABLE 0x0004
-#define MPC52xx_PSC_TX_DISABLE 0x0008
-#define MPC52xx_PSC_SEL_MODE_REG_1 0x0010
-#define MPC52xx_PSC_RST_RX 0x0020
-#define MPC52xx_PSC_RST_TX 0x0030
-#define MPC52xx_PSC_RST_ERR_STAT 0x0040
-#define MPC52xx_PSC_RST_BRK_CHG_INT 0x0050
-#define MPC52xx_PSC_START_BRK 0x0060
-#define MPC52xx_PSC_STOP_BRK 0x0070
-
-/* PSC TxRx FIFO status bits */
-#define MPC52xx_PSC_RXTX_FIFO_ERR 0x0040
-#define MPC52xx_PSC_RXTX_FIFO_UF 0x0020
-#define MPC52xx_PSC_RXTX_FIFO_OF 0x0010
-#define MPC52xx_PSC_RXTX_FIFO_FR 0x0008
-#define MPC52xx_PSC_RXTX_FIFO_FULL 0x0004
-#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002
-#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
-
-/* PSC interrupt mask bits */
-#define MPC52xx_PSC_IMR_TXRDY 0x0100
-#define MPC52xx_PSC_IMR_RXRDY 0x0200
-#define MPC52xx_PSC_IMR_DB 0x0400
-#define MPC52xx_PSC_IMR_IPC 0x8000
-
-/* PSC input port change bit */
-#define MPC52xx_PSC_CTS 0x01
-#define MPC52xx_PSC_DCD 0x02
-#define MPC52xx_PSC_D_CTS 0x10
-#define MPC52xx_PSC_D_DCD 0x20
-
-/* PSC mode fields */
-#define MPC52xx_PSC_MODE_5_BITS 0x00
-#define MPC52xx_PSC_MODE_6_BITS 0x01
-#define MPC52xx_PSC_MODE_7_BITS 0x02
-#define MPC52xx_PSC_MODE_8_BITS 0x03
-#define MPC52xx_PSC_MODE_BITS_MASK 0x03
-#define MPC52xx_PSC_MODE_PAREVEN 0x00
-#define MPC52xx_PSC_MODE_PARODD 0x04
-#define MPC52xx_PSC_MODE_PARFORCE 0x08
-#define MPC52xx_PSC_MODE_PARNONE 0x10
-#define MPC52xx_PSC_MODE_ERR 0x20
-#define MPC52xx_PSC_MODE_FFULL 0x40
-#define MPC52xx_PSC_MODE_RXRTS 0x80
-
-#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
-#define MPC52xx_PSC_MODE_ONE_STOP 0x07
-#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
-
-#define MPC52xx_PSC_RFNUM_MASK 0x01ff
-
-
-/* Structure of the hardware registers */
-struct mpc52xx_psc {
- volatile u8 mode; /* PSC + 0x00 */
- volatile u8 reserved0[3];
- union { /* PSC + 0x04 */
- volatile u16 status;
- volatile u16 clock_select;
- } sr_csr;
-#define mpc52xx_psc_status sr_csr.status
-#define mpc52xx_psc_clock_select sr_csr.clock_select
- volatile u16 reserved1;
- volatile u8 command; /* PSC + 0x08 */
-volatile u8 reserved2[3];
- union { /* PSC + 0x0c */
- volatile u8 buffer_8;
- volatile u16 buffer_16;
- volatile u32 buffer_32;
- } buffer;
-#define mpc52xx_psc_buffer_8 buffer.buffer_8
-#define mpc52xx_psc_buffer_16 buffer.buffer_16
-#define mpc52xx_psc_buffer_32 buffer.buffer_32
- union { /* PSC + 0x10 */
- volatile u8 ipcr;
- volatile u8 acr;
- } ipcr_acr;
-#define mpc52xx_psc_ipcr ipcr_acr.ipcr
-#define mpc52xx_psc_acr ipcr_acr.acr
- volatile u8 reserved3[3];
- union { /* PSC + 0x14 */
- volatile u16 isr;
- volatile u16 imr;
- } isr_imr;
-#define mpc52xx_psc_isr isr_imr.isr
-#define mpc52xx_psc_imr isr_imr.imr
- volatile u16 reserved4;
- volatile u8 ctur; /* PSC + 0x18 */
- volatile u8 reserved5[3];
- volatile u8 ctlr; /* PSC + 0x1c */
- volatile u8 reserved6[3];
- volatile u16 ccr; /* PSC + 0x20 */
- volatile u8 reserved7[14];
- volatile u8 ivr; /* PSC + 0x30 */
- volatile u8 reserved8[3];
- volatile u8 ip; /* PSC + 0x34 */
- volatile u8 reserved9[3];
- volatile u8 op1; /* PSC + 0x38 */
- volatile u8 reserved10[3];
- volatile u8 op0; /* PSC + 0x3c */
- volatile u8 reserved11[3];
- volatile u32 sicr; /* PSC + 0x40 */
- volatile u8 ircr1; /* PSC + 0x44 */
- volatile u8 reserved13[3];
- volatile u8 ircr2; /* PSC + 0x44 */
- volatile u8 reserved14[3];
- volatile u8 irsdr; /* PSC + 0x4c */
- volatile u8 reserved15[3];
- volatile u8 irmdr; /* PSC + 0x50 */
- volatile u8 reserved16[3];
- volatile u8 irfdr; /* PSC + 0x54 */
- volatile u8 reserved17[3];
- volatile u16 rfnum; /* PSC + 0x58 */
- volatile u16 reserved18;
- volatile u16 tfnum; /* PSC + 0x5c */
- volatile u16 reserved19;
- volatile u32 rfdata; /* PSC + 0x60 */
- volatile u16 rfstat; /* PSC + 0x64 */
- volatile u16 reserved20;
- volatile u8 rfcntl; /* PSC + 0x68 */
- volatile u8 reserved21[5];
- volatile u16 rfalarm; /* PSC + 0x6e */
- volatile u16 reserved22;
- volatile u16 rfrptr; /* PSC + 0x72 */
- volatile u16 reserved23;
- volatile u16 rfwptr; /* PSC + 0x76 */
- volatile u16 reserved24;
- volatile u16 rflrfptr; /* PSC + 0x7a */
- volatile u16 reserved25;
- volatile u16 rflwfptr; /* PSC + 0x7e */
- volatile u32 tfdata; /* PSC + 0x80 */
- volatile u16 tfstat; /* PSC + 0x84 */
- volatile u16 reserved26;
- volatile u8 tfcntl; /* PSC + 0x88 */
- volatile u8 reserved27[5];
- volatile u16 tfalarm; /* PSC + 0x8e */
- volatile u16 reserved28;
- volatile u16 tfrptr; /* PSC + 0x92 */
- volatile u16 reserved29;
- volatile u16 tfwptr; /* PSC + 0x96 */
- volatile u16 reserved30;
- volatile u16 tflrfptr; /* PSC + 0x9a */
- volatile u16 reserved31;
- volatile u16 tflwfptr; /* PSC + 0x9e */
-};
-
-
-#endif /* __MPC52xx_PSC_H__ */
#include <platforms/sbs8260.h>
#endif
-#ifdef CONFIG_RPX8260
-#include <platforms/rpx8260.h>
+#ifdef CONFIG_RPX6
+#include <platforms/rpxsuper.h>
#endif
#ifdef CONFIG_WILLOW
#ifdef CONFIG_MPC8540_ADS
#include <platforms/85xx/mpc8540_ads.h>
#endif
-#ifdef CONFIG_MPC8555_CDS
-#include <platforms/85xx/mpc8555_cds.h>
-#endif
-#ifdef CONFIG_MPC8560_ADS
-#include <platforms/85xx/mpc8560_ads.h>
-#endif
#ifdef CONFIG_SBC8560
#include <platforms/85xx/sbc8560.h>
#endif
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
-#ifdef CONFIG_PCI
#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
/*
* The "residual" board information structure the boot loader passes
#define OCP_FUNC_16550 0x0031
#define OCP_FUNC_IIC 0x0032
#define OCP_FUNC_USB 0x0033
-#define OCP_FUNC_PSC_UART 0x0034
/* Memory devices 0x0090 - 0x009F */
#define OCP_FUNC_MAL 0x0090
extern int openpic_get_irq(struct pt_regs *regs);
extern void openpic_reset_processor_phys(u_int cpumask);
extern void openpic_setup_ISU(int isu_num, unsigned long addr);
-extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
+extern void openpic_cause_IPI(u_int ipi, u_int cpumask);
extern void smp_openpic_message_pass(int target, int msg, unsigned long data,
int wait);
extern void openpic_set_k2_cascade(int irq);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PPC_PAGE_H */
--- /dev/null
+/*
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * 2000 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Data structures specific to the IBM PowerPC 405 on-chip DMA controller
+ * and API.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASMPPC_405_DMA_H
+#define __ASMPPC_405_DMA_H
+
+#include <linux/types.h>
+
+/* #define DEBUG_405DMA */
+
+#define TRUE 1
+#define FALSE 0
+
+#define SGL_LIST_SIZE 4096
+/* #define PCI_ALLOC_IS_NONCONSISTENT */
+
+#define MAX_405GP_DMA_CHANNELS 4
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS 0xFFFFFFFF
+
+extern unsigned long ISA_DMA_THRESHOLD;
+
+#define dma_outb outb
+#define dma_inb inb
+
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+#define DMA_MODE_READ DMA_TD /* Peripheral to Memory */
+#define DMA_MODE_WRITE 0 /* Memory to Peripheral */
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq0_ActiveLow (1<<31)
+#define DMAAck0_ActiveLow (1<<30)
+#define EOT0_ActiveLow (1<<29) /* End of Transfer */
+
+#define DMAReq1_ActiveLow (1<<28)
+#define DMAAck1_ActiveLow (1<<27)
+#define EOT1_ActiveLow (1<<26)
+
+#define DMAReq2_ActiveLow (1<<25)
+#define DMAAck2_ActiveLow (1<<24)
+#define EOT2_ActiveLow (1<<23)
+
+#define DMAReq3_ActiveLow (1<<22)
+#define DMAAck3_ActiveLow (1<<21)
+#define EOT3_ActiveLow (1<<20)
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+
+
+/*
+ * DMA Channel Control Registers
+ */
+#define DMA_CH_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CH_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CH_ENABLE(x) (((x)&DMA_CH_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+
+#define SET_DMA_PW(x) (((x)&0x3)<<26) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(3)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>26)
+
+#define DMA_DAI (1<<25) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<25)
+
+#define DMA_SAI (1<<24) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<24)
+
+#define DMA_BEN (1<<23) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<23)
+
+#define SET_DMA_TM(x) (((x)&0x3)<<21) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>21)
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<19) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>19)
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<13) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>13)
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<10) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>10)
+
+#define DMA_ETD_OUTPUT (1<<9) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<9)
+
+#define DMA_TCE_ENABLE (1<<8)
+#define SET_DMA_TCE(x) (((x)&0x1)<<8)
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<6) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>6)
+
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<4) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>4)
+
+#define DMA_PCE (1<<3) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<3)
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>3)
+
+#define DMA_DEC (1<<2) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG0_ENABLE (1<<31) /* Start Scatter Gather */
+#define SSG1_ENABLE (1<<30)
+#define SSG2_ENABLE (1<<29)
+#define SSG3_ENABLE (1<<28)
+#define SSG0_MASK_ENABLE (1<<15) /* Enable writing to SSG0 bit */
+#define SSG1_MASK_ENABLE (1<<14)
+#define SSG2_MASK_ENABLE (1<<13)
+#define SSG3_MASK_ENABLE (1<<12)
+
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+
+
+
+typedef uint32_t sgl_handle_t;
+
+typedef struct {
+
+ /*
+ * Valid polarity settings:
+ * DMAReq0_ActiveLow
+ * DMAAck0_ActiveLow
+ * EOT0_ActiveLow
+ *
+ * DMAReq1_ActiveLow
+ * DMAAck1_ActiveLow
+ * EOT1_ActiveLow
+ *
+ * DMAReq2_ActiveLow
+ * DMAAck2_ActiveLow
+ * EOT2_ActiveLow
+ *
+ * DMAReq3_ActiveLow
+ * DMAAck3_ActiveLow
+ * EOT3_ActiveLow
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ dma_addr_t addr;
+
+} ppc_dma_ch_t;
+
+
+typedef struct {
+ uint32_t control;
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+
+
+
+typedef struct {
+ unsigned int dmanr;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ ppc_sgl_t *phead;
+ ppc_sgl_t *ptail;
+
+} sgl_list_info_t;
+
+
+typedef struct {
+ unsigned int *src_addr;
+ unsigned int *dst_addr;
+ dma_addr_t dma_src_addr;
+ dma_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+
+extern ppc_dma_ch_t dma_channels[];
+
+/*
+ *
+ * DMA API inline functions
+ * These functions are implemented here as inline functions for
+ * performance reasons.
+ *
+ */
+
+static __inline__ int get_405gp_dma_status(void)
+{
+ return (mfdcr(DCRN_DMASR));
+}
+
+
+static __inline__ int enable_405gp_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("enable_dma: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+
+ switch (dmanr) {
+ case 0:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ mtdcr(DCRN_DMASA0, NULL);
+ mtdcr(DCRN_DMADA0, p_dma_ch->addr);
+ }
+ else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ mtdcr(DCRN_DMASA0, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA0, NULL);
+ }
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA1, NULL);
+ mtdcr(DCRN_DMADA1, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA1, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA1, NULL);
+ }
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA2, NULL);
+ mtdcr(DCRN_DMADA2, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA2, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA2, NULL);
+ }
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA3, NULL);
+ mtdcr(DCRN_DMADA3, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA3, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA3, NULL);
+ }
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+
+static __inline__ void disable_405gp_dma(unsigned int dmanr)
+{
+ unsigned int control;
+
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("disable_dma: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+static __inline__ int set_405gp_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ switch (mode) {
+ case DMA_MODE_READ:
+ case DMA_MODE_WRITE:
+ case DMA_MODE_MM:
+ case DMA_MODE_MM_DEVATSRC:
+ case DMA_MODE_MM_DEVATDST:
+ break;
+ default:
+ printk("set_dma_mode: bad mode 0x%x\n", mode);
+ return DMA_STATUS_BAD_MODE;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("set_dma_mode: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ p_dma_ch->mode = mode;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+static __inline__ void
+set_405gp_dma_count(unsigned int dmanr, unsigned int count)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, p_dma_ch->pwidth);
+ }
+#endif
+
+ count = count >> p_dma_ch->shift;
+ switch (dmanr) {
+ case 0:
+ mtdcr(DCRN_DMACT0, count);
+ break;
+ case 1:
+ mtdcr(DCRN_DMACT1, count);
+ break;
+ case 2:
+ mtdcr(DCRN_DMACT2, count);
+ break;
+ case 3:
+ mtdcr(DCRN_DMACT3, count);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_dma_count: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Returns the number of bytes left to be transfered.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+static __inline__ int get_405gp_dma_residue(unsigned int dmanr)
+{
+ unsigned int count;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ switch (dmanr) {
+ case 0:
+ count = mfdcr(DCRN_DMACT0);
+ break;
+ case 1:
+ count = mfdcr(DCRN_DMACT1);
+ break;
+ case 2:
+ count = mfdcr(DCRN_DMACT2);
+ break;
+ case 3:
+ count = mfdcr(DCRN_DMACT3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_dma_residue: bad channel: %d\n", dmanr);
+#endif
+ return 0;
+ }
+
+ return (count << p_dma_ch->shift);
+}
+
+
+
+/*
+ * Sets the DMA address for a memory to peripheral or peripheral
+ * to memory transfer. The address is just saved in the channel
+ * structure for now and used later in enable_dma().
+ */
+static __inline__ void set_405gp_dma_addr(unsigned int dmanr, dma_addr_t addr)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if ((unsigned)addr & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if ((unsigned)addr & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if ((unsigned)addr & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_addr: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_addr addr 0x%x bus width %d\n",
+ addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ /* save dma address and program it later after we know the xfer mode */
+ p_dma_ch->addr = addr;
+}
+
+
+
+
+/*
+ * Sets both DMA addresses for a memory to memory transfer.
+ * For memory to peripheral or peripheral to memory transfers
+ * the function set_dma_addr() should be used instead.
+ */
+static __inline__ void
+set_405gp_dma_addr2(unsigned int dmanr, dma_addr_t src_dma_addr,
+ dma_addr_t dst_dma_addr)
+{
+#ifdef DEBUG_405DMA
+ {
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (((unsigned)src_dma_addr & 0x1) ||
+ ((unsigned)dst_dma_addr & 0x1)
+ )
+ error = 1;
+ break;
+ case PW_32:
+ if (((unsigned)src_dma_addr & 0x3) ||
+ ((unsigned)dst_dma_addr & 0x3)
+ )
+ error = 1;
+ break;
+ case PW_64:
+ if (((unsigned)src_dma_addr & 0x7) ||
+ ((unsigned)dst_dma_addr & 0x7)
+ )
+ error = 1;
+ break;
+ default:
+ printk("set_dma_addr2: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
+ src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ switch (dmanr) {
+ case 0:
+ mtdcr(DCRN_DMASA0, src_dma_addr);
+ mtdcr(DCRN_DMADA0, dst_dma_addr);
+ break;
+ case 1:
+ mtdcr(DCRN_DMASA1, src_dma_addr);
+ mtdcr(DCRN_DMADA1, dst_dma_addr);
+ break;
+ case 2:
+ mtdcr(DCRN_DMASA2, src_dma_addr);
+ mtdcr(DCRN_DMADA2, dst_dma_addr);
+ break;
+ case 3:
+ mtdcr(DCRN_DMASA3, src_dma_addr);
+ mtdcr(DCRN_DMADA3, dst_dma_addr);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_dma_addr2: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+static __inline__ int
+enable_405gp_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ p_dma_ch->int_enable = TRUE;
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control|= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_interrupt: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+static __inline__ int
+disable_405gp_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ p_dma_ch->int_enable = TRUE;
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_interrupt: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+#ifdef DCRNCAP_DMA_SG
+
+/*
+ * Add a new sgl descriptor to the end of a scatter/gather list
+ * which was created by alloc_dma_handle().
+ *
+ * For a memory to memory transfer, both dma addresses must be
+ * valid. For a peripheral to memory transfer, one of the addresses
+ * must be set to NULL, depending on the direction of the transfer:
+ * memory to peripheral: set dst_addr to NULL,
+ * peripheral to memory: set src_addr to NULL.
+ */
+static __inline__ int
+add_405gp_dma_sgl(sgl_handle_t handle, dma_addr_t src_addr, dma_addr_t dst_addr,
+ unsigned int count)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+
+ if (!handle) {
+#ifdef DEBUG_405DMA
+ printk("add_dma_sgl: null handle\n");
+#endif
+ return DMA_STATUS_BAD_HANDLE;
+ }
+
+#ifdef DEBUG_405DMA
+ if (psgl->dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("add_dma_sgl error: psgl->dmanr == %d\n", psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ p_dma_ch = &dma_channels[psgl->dmanr];
+
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ unsigned int aligned = (unsigned)src_addr | (unsigned)dst_addr | count;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (aligned & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (aligned & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (aligned & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("add_dma_sgl: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return DMA_STATUS_GENERAL_ERROR;
+ }
+ if (error)
+ printk("Alignment warning: add_dma_sgl src 0x%x dst 0x%x count 0x%x bus width var %d\n",
+ src_addr, dst_addr, count, p_dma_ch->pwidth);
+
+ }
+#endif
+
+ if ((unsigned)(psgl->ptail + 1) >= ((unsigned)psgl + SGL_LIST_SIZE)) {
+#ifdef DEBUG_405DMA
+ printk("sgl handle out of memory \n");
+#endif
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+
+ if (!psgl->ptail) {
+ psgl->phead = (ppc_sgl_t *)
+ ((unsigned)psgl + sizeof(sgl_list_info_t));
+ psgl->ptail = psgl->phead;
+ } else {
+ psgl->ptail->next = virt_to_bus(psgl->ptail + 1);
+ psgl->ptail++;
+ }
+
+ psgl->ptail->control = psgl->control;
+ psgl->ptail->src_addr = src_addr;
+ psgl->ptail->dst_addr = dst_addr;
+ psgl->ptail->control_count = (count >> p_dma_ch->shift) |
+ psgl->sgl_control;
+ psgl->ptail->next = (uint32_t)NULL;
+
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Enable (start) the DMA described by the sgl handle.
+ */
+static __inline__ void enable_405gp_dma_sgl(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+ uint32_t sg_command;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("enable_dma_sgl: null handle\n");
+ return;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("enable_dma_sgl: bad channel in handle %d\n",
+ psgl->dmanr);
+ return;
+ } else if (!psgl->phead) {
+ printk("enable_dma_sgl: sg list empty\n");
+ return;
+ }
+#endif
+
+ p_dma_ch = &dma_channels[psgl->dmanr];
+ psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */
+ sg_command = mfdcr(DCRN_ASGC);
+
+ switch(psgl->dmanr) {
+ case 0:
+ mtdcr(DCRN_ASG0, virt_to_bus(psgl->phead));
+ sg_command |= SSG0_ENABLE;
+ break;
+ case 1:
+ mtdcr(DCRN_ASG1, virt_to_bus(psgl->phead));
+ sg_command |= SSG1_ENABLE;
+ break;
+ case 2:
+ mtdcr(DCRN_ASG2, virt_to_bus(psgl->phead));
+ sg_command |= SSG2_ENABLE;
+ break;
+ case 3:
+ mtdcr(DCRN_ASG3, virt_to_bus(psgl->phead));
+ sg_command |= SSG3_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_sgl: bad channel: %d\n", psgl->dmanr);
+#endif
+ }
+
+#if 0 /* debug */
+ printk("\n\nenable_dma_sgl at dma_addr 0x%x\n",
+ virt_to_bus(psgl->phead));
+ {
+ ppc_sgl_t *pnext, *sgl_addr;
+
+ pnext = psgl->phead;
+ while (pnext) {
+ printk("dma descriptor at 0x%x, dma addr 0x%x\n",
+ (unsigned)pnext, (unsigned)virt_to_bus(pnext));
+ printk("control 0x%x src 0x%x dst 0x%x c_count 0x%x, next 0x%x\n",
+ (unsigned)pnext->control, (unsigned)pnext->src_addr,
+ (unsigned)pnext->dst_addr,
+ (unsigned)pnext->control_count, (unsigned)pnext->next);
+
+ (unsigned)pnext = bus_to_virt(pnext->next);
+ }
+ printk("sg_command 0x%x\n", sg_command);
+ }
+#endif
+
+#ifdef PCI_ALLOC_IS_NONCONSISTENT
+ /*
+ * This is temporary only, until pci_alloc_consistent() really does
+ * return "consistent" memory.
+ */
+ flush_dcache_range((unsigned)handle, (unsigned)handle + SGL_LIST_SIZE);
+#endif
+
+ mtdcr(DCRN_ASGC, sg_command); /* start transfer */
+}
+
+
+
+/*
+ * Halt an active scatter/gather DMA operation.
+ */
+static __inline__ void disable_405gp_dma_sgl(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ uint32_t sg_command;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("enable_dma_sgl: null handle\n");
+ return;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("enable_dma_sgl: bad channel in handle %d\n",
+ psgl->dmanr);
+ return;
+ }
+#endif
+ sg_command = mfdcr(DCRN_ASGC);
+ switch(psgl->dmanr) {
+ case 0:
+ sg_command &= ~SSG0_ENABLE;
+ break;
+ case 1:
+ sg_command &= ~SSG1_ENABLE;
+ break;
+ case 2:
+ sg_command &= ~SSG2_ENABLE;
+ break;
+ case 3:
+ sg_command &= ~SSG3_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_sgl: bad channel: %d\n", psgl->dmanr);
+#endif
+ }
+
+ mtdcr(DCRN_ASGC, sg_command); /* stop transfer */
+}
+
+
+
+/*
+ * Returns number of bytes left to be transferred from the entire sgl list.
+ * *src_addr and *dst_addr get set to the source/destination address of
+ * the sgl descriptor where the DMA stopped.
+ *
+ * An sgl transfer must NOT be active when this function is called.
+ */
+static __inline__ int
+get_405gp_dma_sgl_residue(sgl_handle_t handle, dma_addr_t *src_addr,
+ dma_addr_t *dst_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+ ppc_sgl_t *pnext, *sgl_addr;
+ uint32_t count_left;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("get_dma_sgl_residue: null handle\n");
+ return DMA_STATUS_BAD_HANDLE;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("get_dma_sgl_residue: bad channel in handle %d\n",
+ psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ switch(psgl->dmanr) {
+ case 0:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG0));
+ count_left = mfdcr(DCRN_DMACT0);
+ break;
+ case 1:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG1));
+ count_left = mfdcr(DCRN_DMACT1);
+ break;
+ case 2:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG2));
+ count_left = mfdcr(DCRN_DMACT2);
+ break;
+ case 3:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG3));
+ count_left = mfdcr(DCRN_DMACT3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue: bad channel: %d\n", psgl->dmanr);
+#endif
+ goto error;
+ }
+
+ if (!sgl_addr) {
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue: sgl addr register is null\n");
+#endif
+ goto error;
+ }
+
+ pnext = psgl->phead;
+ while (pnext &&
+ ((unsigned)pnext < ((unsigned)psgl + SGL_LIST_SIZE) &&
+ (pnext != sgl_addr))
+ ) {
+ pnext = pnext++;
+ }
+
+ if (pnext == sgl_addr) { /* found the sgl descriptor */
+
+ *src_addr = pnext->src_addr;
+ *dst_addr = pnext->dst_addr;
+
+ /*
+ * Now search the remaining descriptors and add their count.
+ * We already have the remaining count from this descriptor in
+ * count_left.
+ */
+ pnext++;
+
+ while ((pnext != psgl->ptail) &&
+ ((unsigned)pnext < ((unsigned)psgl + SGL_LIST_SIZE))
+ ) {
+ count_left += pnext->control_count & SG_COUNT_MASK;
+ }
+
+ if (pnext != psgl->ptail) { /* should never happen */
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue error (1) psgl->ptail 0x%x handle 0x%x\n",
+ (unsigned int)psgl->ptail,
+ (unsigned int)handle);
+#endif
+ goto error;
+ }
+
+ /* success */
+ p_dma_ch = &dma_channels[psgl->dmanr];
+ return (count_left << p_dma_ch->shift); /* count in bytes */
+
+ } else {
+ /* this shouldn't happen */
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n",
+ (unsigned int)sgl_addr, (unsigned int)handle);
+
+#endif
+ }
+
+
+error:
+ *src_addr = (dma_addr_t)NULL;
+ *dst_addr = (dma_addr_t)NULL;
+ return 0;
+}
+
+
+
+
+/*
+ * Returns the address(es) of the buffer(s) contained in the head element of
+ * the scatter/gather list. The element is removed from the scatter/gather
+ * list and the next element becomes the head.
+ *
+ * This function should only be called when the DMA is not active.
+ */
+static __inline__ int
+delete_405gp_dma_sgl_element(sgl_handle_t handle, dma_addr_t *src_dma_addr,
+ dma_addr_t *dst_dma_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("delete_sgl_element: null handle\n");
+ return DMA_STATUS_BAD_HANDLE;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("delete_sgl_element: bad channel in handle %d\n",
+ psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ if (!psgl->phead) {
+#ifdef DEBUG_405DMA
+ printk("delete_sgl_element: sgl list empty\n");
+#endif
+ *src_dma_addr = (dma_addr_t)NULL;
+ *dst_dma_addr = (dma_addr_t)NULL;
+ return DMA_STATUS_SGL_LIST_EMPTY;
+ }
+
+ *src_dma_addr = (dma_addr_t)psgl->phead->src_addr;
+ *dst_dma_addr = (dma_addr_t)psgl->phead->dst_addr;
+
+ if (psgl->phead == psgl->ptail) {
+ /* last descriptor on the list */
+ psgl->phead = NULL;
+ psgl->ptail = NULL;
+ } else {
+ psgl->phead++;
+ }
+
+ return DMA_STATUS_GOOD;
+}
+
+#endif /* DCRNCAP_DMA_SG */
+
+/*
+ * The rest of the DMA API, in ppc405_dma.c
+ */
+extern int hw_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+extern int get_channel_config(unsigned int, ppc_dma_ch_t *);
+extern int set_channel_priority(unsigned int, unsigned int);
+extern unsigned int get_peripheral_width(unsigned int);
+extern int alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+extern void free_dma_handle(sgl_handle_t);
+
+#endif
+#endif /* __KERNEL__ */
+++ /dev/null
-/*
- * include/asm-ppc/ppc4xx_dma.h
- *
- * IBM PPC4xx DMA engine library
- *
- * Copyright 2000-2004 MontaVista Software Inc.
- *
- * Cleaned up a bit more, Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifdef __KERNEL__
-#ifndef __ASMPPC_PPC4xx_DMA_H
-#define __ASMPPC_PPC4xx_DMA_H
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <asm/mmu.h>
-#include <asm/ibm4xx.h>
-
-#undef DEBUG_4xxDMA
-
-#define MAX_PPC4xx_DMA_CHANNELS 4
-
-/* in arch/ppc/kernel/setup.c -- Cort */
-extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
-
-/*
- * Function return status codes
- * These values are used to indicate whether or not the function
- * call was successful, or a bad/invalid parameter was passed.
- */
-#define DMA_STATUS_GOOD 0
-#define DMA_STATUS_BAD_CHANNEL 1
-#define DMA_STATUS_BAD_HANDLE 2
-#define DMA_STATUS_BAD_MODE 3
-#define DMA_STATUS_NULL_POINTER 4
-#define DMA_STATUS_OUT_OF_MEMORY 5
-#define DMA_STATUS_SGL_LIST_EMPTY 6
-#define DMA_STATUS_GENERAL_ERROR 7
-#define DMA_STATUS_CHANNEL_NOTFREE 8
-
-#define DMA_CHANNEL_BUSY 0x80000000
-
-/*
- * These indicate status as returned from the DMA Status Register.
- */
-#define DMA_STATUS_NO_ERROR 0
-#define DMA_STATUS_CS 1 /* Count Status */
-#define DMA_STATUS_TS 2 /* Transfer Status */
-#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
-#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
-
-
-/*
- * DMA Channel Control Registers
- */
-
-#ifdef CONFIG_44x
-#define PPC4xx_DMA_64BIT
-#define DMA_CR_OFFSET 1
-#else
-#define DMA_CR_OFFSET 0
-#endif
-
-#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
-#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
-#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
-
-#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
-#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
-#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
-
-#define DMA_TD (1<<29)
-#define SET_DMA_TD(x) (((x)&0x1)<<29)
-#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
-
-#define DMA_PL (1<<28) /* Peripheral Location */
-#define SET_DMA_PL(x) (((x)&0x1)<<28)
-#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
-
-#define EXTERNAL_PERIPHERAL 0
-#define INTERNAL_PERIPHERAL 1
-
-#define SET_DMA_PW(x) (((x)&0x3)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
-#define DMA_PW_MASK SET_DMA_PW(3)
-#define PW_8 0
-#define PW_16 1
-#define PW_32 2
-#define PW_64 3
-/* FIXME: Add PW_128 support for 440GP DMA block */
-#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
-
-#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
-#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
-
-#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
-#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
-
-#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
-#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
-
-#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
-#define DMA_TM_MASK SET_DMA_TM(3)
-#define TM_PERIPHERAL 0 /* Peripheral */
-#define TM_RESERVED 1 /* Reserved */
-#define TM_S_MM 2 /* Memory to Memory */
-#define TM_D_MM 3 /* Device Paced Memory to Memory */
-#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
-
-#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
-#define DMA_PSC_MASK SET_DMA_PSC(3)
-#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
-
-#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
-#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
-#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
-
-#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
-#define DMA_PHC_MASK SET_DMA_PHC(0x7)
-#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
-
-#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
-#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
-
-#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
-#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
-
-#define DMA_DEC (1<<(2) /* Address Decrement */
-#define SET_DMA_DEC(x) (((x)&0x1)<<2)
-#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
-
-/*
- * Transfer Modes
- * These modes are defined in a way that makes it possible to
- * simply "or" in the value in the control register.
- */
-
-#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
-
- /* Device-paced memory to memory, */
- /* device is at source address */
-#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
-
- /* Device-paced memory to memory, */
- /* device is at destination address */
-#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
-
-/* 405gp/440gp */
-#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
-#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
-#define PREFETCH_1 0 /* Prefetch 1 Double Word */
-#define PREFETCH_2 1
-#define PREFETCH_4 2
-#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
-
-#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
-#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
-#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
-
-/* stb3x */
-
-#define DMA_ECE_ENABLE (1<<5)
-#define SET_DMA_ECE(x) (((x)&0x1)<<5)
-#define GET_DMA_ECE(x) (((x)&DMA_ECE_ENABLE)>>5)
-
-#define DMA_TCD_DISABLE (1<<4)
-#define SET_DMA_TCD(x) (((x)&0x1)<<4)
-#define GET_DMA_TCD(x) (((x)&DMA_TCD_DISABLE)>>4)
-
-typedef uint32_t sgl_handle_t;
-
-#ifdef CONFIG_PPC4xx_EDMA
-
-#define SGL_LIST_SIZE 4096
-#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
-
-#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
-#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
-#define PRIORITY_LOW 0
-#define PRIORITY_MID_LOW 1
-#define PRIORITY_MID_HIGH 2
-#define PRIORITY_HIGH 3
-#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
-
-/*
- * DMA Polarity Configuration Register
- */
-#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
-#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
-#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
-
-/*
- * DMA Sleep Mode Register
- */
-#define SLEEP_MODE_ENABLE (1<<21)
-
-/*
- * DMA Status Register
- */
-#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
-#define DMA_CS1 (1<<30)
-#define DMA_CS2 (1<<29)
-#define DMA_CS3 (1<<28)
-
-#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
-#define DMA_TS1 (1<<26)
-#define DMA_TS2 (1<<25)
-#define DMA_TS3 (1<<24)
-
-#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
-#define DMA_CH1_ERR (1<<22)
-#define DMA_CH2_ERR (1<<21)
-#define DMA_CH3_ERR (1<<20)
-
-#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
-#define DMA_IN_DMA_REQ1 (1<<18)
-#define DMA_IN_DMA_REQ2 (1<<17)
-#define DMA_IN_DMA_REQ3 (1<<16)
-
-#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
-#define DMA_EXT_DMA_REQ1 (1<<14)
-#define DMA_EXT_DMA_REQ2 (1<<13)
-#define DMA_EXT_DMA_REQ3 (1<<12)
-
-#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
-#define DMA_CH1_BUSY (1<<10)
-#define DMA_CH2_BUSY (1<<9)
-#define DMA_CH3_BUSY (1<<8)
-
-#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
-#define DMA_SG1 (1<<6)
-#define DMA_SG2 (1<<5)
-#define DMA_SG3 (1<<4)
-
-/*
- * DMA SG Command Register
- */
-#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
-#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
-
-/*
- * DMA Scatter/Gather Descriptor Bit fields
- */
-#define SG_LINK (1<<31) /* Link */
-#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
-#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
-#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
-#define SG_COUNT_MASK 0xFFFF /* Count Field */
-
-#define SET_DMA_CONTROL \
- (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
- SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
- SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
- SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
- SET_DMA_PL(p_init->pl) | /* peripheral location */ \
- SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
- SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
- SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
- SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
- SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
- SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
- SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
- SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
-
-#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
-
-#elif defined(CONFIG_STBXXX_DMA) /* stb03xxx */
-
-#define DMA_PPC4xx_SIZE 4096
-
-/*
- * DMA Status Register
- */
-
-#define SET_DMA_PRIORITY(x) (((x)&0x00800001)) /* DMA Channel Priority */
-#define DMA_PRIORITY_MASK 0x00800001
-#define PRIORITY_LOW 0x00000000
-#define PRIORITY_MID_LOW 0x00000001
-#define PRIORITY_MID_HIGH 0x00800000
-#define PRIORITY_HIGH 0x00800001
-#define GET_DMA_PRIORITY(x) (((((x)&DMA_PRIORITY_MASK) &0x00800000) >> 22 ) | (((x)&DMA_PRIORITY_MASK) &0x00000001))
-
-#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
-#define DMA_CS1 (1<<30)
-#define DMA_CS2 (1<<29)
-#define DMA_CS3 (1<<28)
-
-#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
-#define DMA_TS1 (1<<26)
-#define DMA_TS2 (1<<25)
-#define DMA_TS3 (1<<24)
-
-#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
-#define DMA_CH1_ERR (1<<22)
-#define DMA_CH2_ERR (1<<21)
-#define DMA_CH3_ERR (1<<20)
-
-#define DMA_CT0 (1<<19) /* Chained transfere */
-
-#define DMA_IN_DMA_REQ0 (1<<18) /* Internal DMA Request is pending */
-#define DMA_IN_DMA_REQ1 (1<<17)
-#define DMA_IN_DMA_REQ2 (1<<16)
-#define DMA_IN_DMA_REQ3 (1<<15)
-
-#define DMA_EXT_DMA_REQ0 (1<<14) /* External DMA Request is pending */
-#define DMA_EXT_DMA_REQ1 (1<<13)
-#define DMA_EXT_DMA_REQ2 (1<<12)
-#define DMA_EXT_DMA_REQ3 (1<<11)
-
-#define DMA_CH0_BUSY (1<<10) /* DMA Channel 0 Busy */
-#define DMA_CH1_BUSY (1<<9)
-#define DMA_CH2_BUSY (1<<8)
-#define DMA_CH3_BUSY (1<<7)
-
-#define DMA_CT1 (1<<6) /* Chained transfere */
-#define DMA_CT2 (1<<5)
-#define DMA_CT3 (1<<4)
-
-#define DMA_CH_ENABLE (1<<7)
-#define SET_DMA_CH(x) (((x)&0x1)<<7)
-#define GET_DMA_CH(x) (((x)&DMA_CH_ENABLE)>>7)
-
-/* STBx25xxx dma unique */
-/* enable device port on a dma channel
- * example ext 0 on dma 1
- */
-
-#define SSP0_RECV 15
-#define SSP0_XMIT 14
-#define EXT_DMA_0 12
-#define SC1_XMIT 11
-#define SC1_RECV 10
-#define EXT_DMA_2 9
-#define EXT_DMA_3 8
-#define SERIAL2_XMIT 7
-#define SERIAL2_RECV 6
-#define SC0_XMIT 5
-#define SC0_RECV 4
-#define SERIAL1_XMIT 3
-#define SERIAL1_RECV 2
-#define SERIAL0_XMIT 1
-#define SERIAL0_RECV 0
-
-#define DMA_CHAN_0 1
-#define DMA_CHAN_1 2
-#define DMA_CHAN_2 3
-#define DMA_CHAN_3 4
-
-/* end STBx25xx */
-
-/*
- * Bit 30 must be one for Redwoods, otherwise transfers may receive errors.
- */
-#define DMA_CR_MB0 0x2
-
-#define SET_DMA_CONTROL \
- (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
- SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
- SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
- SET_DMA_PL(p_init->pl) | /* peripheral location */ \
- SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
- SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
- SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
- SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
- SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
- SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
- SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
- SET_DMA_TCD(p_init->tcd_disable) | /* TC chain mode disable */ \
- SET_DMA_ECE(p_init->ece_enable) | /* ECE chanin mode enable */ \
- SET_DMA_CH(p_init->ch_enable) | /* Chain enable */ \
- DMA_CR_MB0 /* must be one */)
-
-#define GET_DMA_POLARITY(chan) chan
-
-#endif
-
-typedef struct {
- unsigned short in_use; /* set when channel is being used, clr when
- * available.
- */
- /*
- * Valid polarity settings:
- * DMAReq_ActiveLow(n)
- * DMAAck_ActiveLow(n)
- * EOT_ActiveLow(n)
- *
- * n is 0 to max dma chans
- */
- unsigned int polarity;
-
- char buffer_enable; /* Boolean: buffer enable */
- char tce_enable; /* Boolean: terminal count enable */
- char etd_output; /* Boolean: eot pin is a tc output */
- char pce; /* Boolean: parity check enable */
-
- /*
- * Peripheral location:
- * INTERNAL_PERIPHERAL (UART0 on the 405GP)
- * EXTERNAL_PERIPHERAL
- */
- char pl; /* internal/external peripheral */
-
- /*
- * Valid pwidth settings:
- * PW_8
- * PW_16
- * PW_32
- * PW_64
- */
- unsigned int pwidth;
-
- char dai; /* Boolean: dst address increment */
- char sai; /* Boolean: src address increment */
-
- /*
- * Valid psc settings: 0-3
- */
- unsigned int psc; /* Peripheral Setup Cycles */
-
- /*
- * Valid pwc settings:
- * 0-63
- */
- unsigned int pwc; /* Peripheral Wait Cycles */
-
- /*
- * Valid phc settings:
- * 0-7
- */
- unsigned int phc; /* Peripheral Hold Cycles */
-
- /*
- * Valid cp (channel priority) settings:
- * PRIORITY_LOW
- * PRIORITY_MID_LOW
- * PRIORITY_MID_HIGH
- * PRIORITY_HIGH
- */
- unsigned int cp; /* channel priority */
-
- /*
- * Valid pf (memory read prefetch) settings:
- *
- * PREFETCH_1
- * PREFETCH_2
- * PREFETCH_4
- */
- unsigned int pf; /* memory read prefetch */
-
- /*
- * Boolean: channel interrupt enable
- * NOTE: for sgl transfers, only the last descriptor will be setup to
- * interrupt.
- */
- char int_enable;
-
- char shift; /* easy access to byte_count shift, based on */
- /* the width of the channel */
-
- uint32_t control; /* channel control word */
-
- /* These variabled are used ONLY in single dma transfers */
- unsigned int mode; /* transfer mode */
- phys_addr_t addr;
- char ce; /* channel enable */
-#ifdef CONFIG_STB03xxx
- char ch_enable;
- char tcd_disable;
- char ece_enable;
- char td; /* transfer direction */
-#endif
-
-} ppc_dma_ch_t;
-
-/*
- * PPC44x DMA implementations have a slightly different
- * descriptor layout. Probably moved about due to the
- * change to 64-bit addresses and link pointer. I don't
- * know why they didn't just leave control_count after
- * the dst_addr.
- */
-#ifdef PPC4xx_DMA_64BIT
-typedef struct {
- uint32_t control;
- uint32_t control_count;
- phys_addr_t src_addr;
- phys_addr_t dst_addr;
- phys_addr_t next;
-} ppc_sgl_t;
-#else
-typedef struct {
- uint32_t control;
- phys_addr_t src_addr;
- phys_addr_t dst_addr;
- uint32_t control_count;
- uint32_t next;
-} ppc_sgl_t;
-#endif
-
-typedef struct {
- unsigned int dmanr;
- uint32_t control; /* channel ctrl word; loaded from each descrptr */
- uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
- dma_addr_t dma_addr; /* dma (physical) address of this list */
- ppc_sgl_t *phead;
- dma_addr_t phead_dma;
- ppc_sgl_t *ptail;
- dma_addr_t ptail_dma;
-} sgl_list_info_t;
-
-typedef struct {
- phys_addr_t *src_addr;
- phys_addr_t *dst_addr;
- phys_addr_t dma_src_addr;
- phys_addr_t dma_dst_addr;
-} pci_alloc_desc_t;
-
-extern ppc_dma_ch_t dma_channels[];
-
-/*
- * The DMA API are in ppc4xx_dma.c and ppc4xx_sgdma.c
- */
-extern int ppc4xx_init_dma_channel(unsigned int, ppc_dma_ch_t *);
-extern int ppc4xx_get_channel_config(unsigned int, ppc_dma_ch_t *);
-extern int ppc4xx_set_channel_priority(unsigned int, unsigned int);
-extern unsigned int ppc4xx_get_peripheral_width(unsigned int);
-extern void ppc4xx_set_sg_addr(int, phys_addr_t);
-extern int ppc4xx_add_dma_sgl(sgl_handle_t, phys_addr_t, phys_addr_t, unsigned int);
-extern void ppc4xx_enable_dma_sgl(sgl_handle_t);
-extern void ppc4xx_disable_dma_sgl(sgl_handle_t);
-extern int ppc4xx_get_dma_sgl_residue(sgl_handle_t, phys_addr_t *, phys_addr_t *);
-extern int ppc4xx_delete_dma_sgl_element(sgl_handle_t, phys_addr_t *, phys_addr_t *);
-extern int ppc4xx_alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
-extern void ppc4xx_free_dma_handle(sgl_handle_t);
-extern int ppc4xx_get_dma_status(void);
-extern void ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr);
-extern void ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr);
-extern void ppc4xx_enable_dma(unsigned int dmanr);
-extern void ppc4xx_disable_dma(unsigned int dmanr);
-extern void ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count);
-extern int ppc4xx_get_dma_residue(unsigned int dmanr);
-extern void ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
- phys_addr_t dst_dma_addr);
-extern int ppc4xx_enable_dma_interrupt(unsigned int dmanr);
-extern int ppc4xx_disable_dma_interrupt(unsigned int dmanr);
-extern int ppc4xx_clr_dma_status(unsigned int dmanr);
-extern int ppc4xx_map_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
-extern int ppc4xx_disable_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
-extern int ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode);
-
-/* These are in kernel/dma.c: */
-
-/* reserve a DMA channel */
-extern int request_dma(unsigned int dmanr, const char *device_id);
-/* release it again */
-extern void free_dma(unsigned int dmanr);
-#endif
-#endif /* __KERNEL__ */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(CONFIG_8xx) || defined(CONFIG_CPM2) || defined(CONFIG_85xx)
unsigned long bi_immr_base; /* base of IMMR register */
-#endif
-#if defined(CONFIG_PPC_MPC52xx)
- unsigned long bi_mbar_base; /* base of internal registers */
#endif
unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
unsigned long bi_ip_addr; /* IP Address */
unsigned long bi_brgfreq; /* BRG_CLK Freq, in MHz */
unsigned long bi_sccfreq; /* SCC_CLK Freq, in MHz */
unsigned long bi_vco; /* VCO Out from PLL, in MHz */
-#endif
-#if defined(CONFIG_PPC_MPC52xx)
- unsigned long bi_ipbfreq; /* IPB Bus Freq, in MHz */
- unsigned long bi_pcifreq; /* PCI Bus Freq, in MHz */
#endif
unsigned long bi_baudrate; /* Console Baudrate */
#if defined(CONFIG_405GP)
#define DBAT6U SPRN_DBAT6U /* Data BAT 6 Upper Register */
#define DBAT7L SPRN_DBAT7L /* Data BAT 7 Lower Register */
#define DBAT7U SPRN_DBAT7U /* Data BAT 7 Upper Register */
-//#define DEC SPRN_DEC /* Decrement Register */
+#define DEC SPRN_DEC /* Decrement Register */
#define DMISS SPRN_DMISS /* Data TLB Miss Register */
#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
#define EAR SPRN_EAR /* External Address Register */
#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
#define L2CR SPRN_L2CR /* Classic PPC L2 cache control register */
#define L3CR SPRN_L3CR /* PPC 745x L3 cache control register */
-//#define LR SPRN_LR
+#define LR SPRN_LR
#define PVR SPRN_PVR /* Processor Version */
-//#define RPA SPRN_RPA /* Required Physical Address Register */
+#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#define SVR_8555E 0x80790000
#define SVR_8560 0x80700000
-#if 0
/* Segment Registers */
#define SR0 0
#define SR1 1
#define SR13 13
#define SR14 14
#define SR15 15
-#endif
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
+typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
};
typedef struct sigaltstack {
- void __user *ss_sp;
+ void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
((addr) <= current->thread.fs.seg \
&& ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
-#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
extern inline int verify_area(int type, const void __user * addr, unsigned long size)
{
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
- __chk_user_ptr(ptr); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
- __chk_user_ptr(ptr); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
({ \
long __gu_err; \
long long __gu_val; \
- __chk_user_ptr(ptr); \
__get_user_size64(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
({ \
long __gu_err = -EFAULT; \
long long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
struct ucontext {
unsigned long uc_flags;
- struct ucontext __user *uc_link;
+ struct ucontext *uc_link;
stack_t uc_stack;
int uc_pad[7];
- struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+ struct mcontext *uc_regs; /* points to uc_mcontext field */
sigset_t uc_sigmask;
/* glibc has 1024-bit signal masks, ours are 64-bit */
int uc_maskext[30];
#define __NR_mq_notify 266
#define __NR_mq_getsetattr 267
#define __NR_kexec_load 268
-#define __NR_ioprio_set 269
-#define __NR_ioprio_get 270
-#define __NR_syscalls 271
+#define __NR_syscalls 269
#define __NR(n) #n
out_be64(vaddr, val);
}
-#define EEH_CHECK_ALIGN(v,a) \
- ((((unsigned long)(v)) & ((a) - 1)) == 0)
-
static inline void eeh_memset_io(void *addr, int c, unsigned long n) {
void *vaddr = (void *)IO_TOKEN_TO_ADDR(addr);
- u32 lc = c;
- lc |= lc << 8;
- lc |= lc << 16;
-
- while(n && !EEH_CHECK_ALIGN(vaddr, 4)) {
- *((volatile u8 *)vaddr) = c;
- vaddr = (void *)((unsigned long)vaddr + 1);
- n--;
- }
- while(n >= 4) {
- *((volatile u32 *)vaddr) = lc;
- vaddr = (void *)((unsigned long)vaddr + 4);
- n -= 4;
- }
- while(n) {
- *((volatile u8 *)vaddr) = c;
- vaddr = (void *)((unsigned long)vaddr + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
+ memset(vaddr, c, n);
}
static inline void eeh_memcpy_fromio(void *dest, void *src, unsigned long n) {
void *vsrc = (void *)IO_TOKEN_TO_ADDR(src);
- void *vsrcsave = vsrc, *destsave = dest, *srcsave = src;
- unsigned long nsave = n;
-
- while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 1);
- dest = (void *)((unsigned long)dest + 1);
- n--;
- }
- while(n > 4) {
- *((u32 *)dest) = *((volatile u32 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 4);
- dest = (void *)((unsigned long)dest + 4);
- n -= 4;
- }
- while(n) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 1);
- dest = (void *)((unsigned long)dest + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
-
+ memcpy(dest, vsrc, n);
/* Look for ffff's here at dest[n]. Assume that at least 4 bytes
* were copied. Check all four bytes.
*/
- if ((nsave >= 4) &&
- (EEH_POSSIBLE_ERROR(srcsave, vsrcsave, (*((u32 *) destsave+nsave-4)),
- u32))) {
- eeh_check_failure(srcsave, (*((u32 *) destsave+nsave-4)));
+ if ((n >= 4) &&
+ (EEH_POSSIBLE_ERROR(src, vsrc, (*((u32 *) dest+n-4)), u32))) {
+ eeh_check_failure(src, (*((u32 *) dest+n-4)));
}
}
static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
void *vdest = (void *)IO_TOKEN_TO_ADDR(dest);
-
- while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src = (void *)((unsigned long)src + 1);
- vdest = (void *)((unsigned long)vdest + 1);
- n--;
- }
- while(n > 4) {
- *((volatile u32 *)vdest) = *((volatile u32 *)src);
- src = (void *)((unsigned long)src + 4);
- vdest = (void *)((unsigned long)vdest + 4);
- n-=4;
- }
- while(n) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src = (void *)((unsigned long)src + 1);
- vdest = (void *)((unsigned long)vdest + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
+ memcpy(vdest, src, n);
}
-#undef EEH_CHECK_ALIGN
-
#define MAX_ISA_PORT 0x10000
extern unsigned long io_page_mask;
#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) & io_page_mask)
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
+++ /dev/null
-/*
- * hvcserver.h
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PPC64_HVCSERVER_H
-#define _PPC64_HVCSERVER_H
-
-#include <linux/list.h>
-
-/* Converged Location Code length */
-#define HVCS_CLC_LENGTH 79
-
-struct hvcs_partner_info {
- struct list_head node;
- unsigned int unit_address;
- unsigned int partition_ID;
- char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
-};
-
-extern int hvcs_free_partner_info(struct list_head *head);
-extern int hvcs_get_partner_info(unsigned int unit_address,
- struct list_head *head, unsigned long *pi_buff);
-extern int hvcs_register_connection(unsigned int unit_address,
- unsigned int p_partition_ID, unsigned int p_unit_address);
-extern int hvcs_free_connection(unsigned int unit_address);
-
-#endif /* _PPC64_HVCSERVER_H */
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/threads.h>
+#include <asm/atomic.h>
/*
* Maximum number of interrupt sources that we can handle.
return virt_irq_to_real_map[virt_irq];
}
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
-
/*
* Because many systems have two overlapping names spaces for
* interrupts (ISA and XICS for example), and the ISA interrupts
mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
ctx; })
+/*
+ * Hardware Segment Lookaside Buffer Entry
+ * This structure has been padded out to two 64b doublewords (actual SLBE's are
+ * 94 bits). This padding facilites use by the segment management
+ * instructions.
+ */
typedef struct {
unsigned long esid: 36; /* Effective segment ID */
unsigned long resv0:20; /* Reserved */
} dw1;
} STE;
+typedef struct {
+ unsigned long esid: 36; /* Effective segment ID */
+ unsigned long v: 1; /* Entry valid (v=1) or invalid */
+ unsigned long null1:15; /* padding to a 64b boundary */
+ unsigned long index:12; /* Index to select SLB entry. Used by slbmte */
+} slb_dword0;
+
+typedef struct {
+ unsigned long vsid: 52; /* Virtual segment ID */
+ unsigned long ks: 1; /* Supervisor (privileged) state storage key */
+ unsigned long kp: 1; /* Problem state storage key */
+ unsigned long n: 1; /* No-execute if n=1 */
+ unsigned long l: 1; /* Virt pages are large (l=1) or 4KB (l=0) */
+ unsigned long c: 1; /* Class */
+ unsigned long resv0: 7; /* Padding to a 64b boundary */
+} slb_dword1;
+
+typedef struct {
+ union {
+ unsigned long dword0;
+ slb_dword0 dw0;
+ } dw0;
+
+ union {
+ unsigned long dword1;
+ slb_dword1 dw1;
+ } dw1;
+} SLBE;
+
/* Hardware Page Table Entry */
#define HPTES_PER_GROUP 8
#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
-#define SLB_NUM_BOLTED 2
-#define SLB_CACHE_ENTRIES 8
-
-/* Bits in the SLB ESID word */
-#define SLB_ESID_V 0x0000000008000000 /* entry is valid */
-
-/* Bits in the SLB VSID word */
-#define SLB_VSID_SHIFT 12
-#define SLB_VSID_KS 0x0000000000000800
-#define SLB_VSID_KP 0x0000000000000400
-#define SLB_VSID_N 0x0000000000000200 /* no-execute */
-#define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
-#define SLB_VSID_C 0x0000000000000080 /* class */
-
-#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
-#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
-
-#define VSID_RANDOMIZER ASM_CONST(42470972311)
-#define VSID_MASK 0xfffffffffUL
-/* Because we never access addresses below KERNELBASE as kernel
- * addresses, this VSID is never used for anything real, and will
- * never have pages hashed into it */
-#define BAD_VSID ASM_CONST(0)
-
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
}
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
-extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void flush_slb(struct task_struct *tsk, struct mm_struct *mm);
/*
* switch_mm is the entry point called from the architecture independent
return;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
- switch_slb(tsk, next);
+ flush_slb(tsk, next);
else
flush_stab(tsk, next);
}
local_irq_restore(flags);
}
+#define VSID_RANDOMIZER 42470972311UL
+#define VSID_MASK 0xfffffffffUL
+
+
/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
*/
static inline unsigned long
u64 exmc[8]; /* used for machine checks */
u64 exslb[8]; /* used for SLB/segment table misses
* on the linear mapping */
- u64 slb_r3; /* spot to save R3 on SLB miss */
- mm_context_t context;
- u16 slb_cache[SLB_CACHE_ENTRIES];
- u16 slb_cache_ptr;
+ u64 exdsi[8]; /* used for linear mapping hash table misses */
/*
* then miscellaneous read-write fields
*/
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
- u64 stab_rr; /* stab/slb round-robin counter */
+ u64 stab_next_rr; /* stab/slb round-robin counter */
u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u32 lpevent_count; /* lpevents processed */
u8 proc_enabled; /* irq soft-enable flag */
- /* not yet used */
- u64 exdsi[8]; /* used for linear mapping hash table misses */
-
/*
* iSeries structues which the hypervisor knows about - Not
* sure if these particularly need to be cacheline aligned.
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
-#define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
#ifdef CONFIG_HUGETLB_PAGE
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/* For 64-bit processes the hugepage range is 1T-1.5T */
-#define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
-#define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
+#define TASK_HPAGE_BASE (0x0000010000000000UL)
+#define TASK_HPAGE_END (0x0000018000000000UL)
#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- (1U << GET_ESID(addr))) & 0xffff)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PPC64_PAGE_H */
* the PCI memory space in the CPU bus space
*/
unsigned long pci_mem_offset;
+ unsigned long pci_io_offset;
struct pci_ops *ops;
volatile unsigned int *cfg_addr;
#define PVR SPRN_PVR /* Processor Version */
#define PIR SPRN_PIR /* Processor ID */
#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
-//#define RPA SPRN_RPA /* Required Physical Address Register */
+#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#endif /* ASSEMBLY */
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
/*
* Number of entries in the SLB. If this ever changes we should handle
* it with a use a cpu feature fixup.
extern void print_properties(struct device_node *node);
extern int prom_n_addr_cells(struct device_node* np);
extern int prom_n_size_cells(struct device_node* np);
-extern int prom_n_intr_cells(struct device_node* np);
extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
extern void prom_add_property(struct device_node* np, struct property* prop);
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
void xics_setup_cpu(void);
void xics_cause_IPI(int cpu);
-/* first argument is ignored for now*/
-void pSeriesLP_cppr_info(int n_cpu, u8 value);
-
struct xics_ipi_struct {
volatile unsigned long value;
} ____cacheline_aligned;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup (tux) */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _S390_PAGE_H */
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
typedef struct {
__u32 ar4;
} mm_segment_t;
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_PAGE_H */
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
-#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
#ifndef _SPARC_OPENPROMIO_H
#define _SPARC_OPENPROMIO_H
-#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char __user *op_name; /* Pointer to the property name. */
+ char *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char __user *op_buf; /* Pointer to buffer. */
+ char *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _SPARC_PAGE_H */
extern void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction);
+/* map_page and map_single cannot fail */
+static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
{
}
-#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == PCI_DMA_ERROR_CODE);
-}
-
#endif /* __KERNEL__ */
/* generic pci stuff */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {32768, 32768}, \
+ {PAGE_SIZE, PAGE_SIZE}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
-#define flush_cache_range(vma, start, end) \
- flush_cache_mm((vma)->vm_mm)
+extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
#ifndef __LINUX_FBIO_H
#define __LINUX_FBIO_H
-#include <linux/compiler.h>
-
/* Constants used for fbio SunOS compatibility */
/* (C) 1996 Miguel de Icaza */
struct fbcmap {
int index; /* first element (0 origin) */
int count;
- unsigned char __user *red;
- unsigned char __user *green;
- unsigned char __user *blue;
+ unsigned char *red;
+ unsigned char *green;
+ unsigned char *blue;
};
#ifdef __KERNEL__
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
-#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
volatile int doing_pdma = 0;
/* This is software state */
-char *pdma_base = NULL;
+char *pdma_base = 0;
unsigned long pdma_areasize;
/* Common routines to all controller types on the Sparc. */
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
- pdma_base = NULL;
+ pdma_base = 0;
}
}
} else {
#ifdef CONFIG_PCI
struct linux_ebus *ebus;
- struct linux_ebus_device *edev = NULL;
+ struct linux_ebus_device *edev = 0;
unsigned long config = 0;
unsigned long auxio_reg;
spin_unlock(&mm->page_table_lock);
}
+extern void __flush_tlb_mm(unsigned long, unsigned long);
+
#define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */
#ifndef _SPARC64_OPENPROMIO_H
#define _SPARC64_OPENPROMIO_H
-#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char __user *op_name; /* Pointer to the property name. */
+ char *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char __user *op_buf; /* Pointer to buffer. */
+ char *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define clear_page(X) _clear_page((void *)(X))
struct page;
extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
-#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
+#define copy_page(X,Y) __memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* GROSS, defining this makes gcc pass these types as aggregates,
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* !(__KERNEL__) */
#endif /* !(_SPARC64_PAGE_H) */
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte = __pte_alloc_one_kernel(mm, address);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = address & PMD_MASK;
- }
- return pte;
-}
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = __pte_alloc_one_kernel(mm, addr);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = addr & PMD_MASK;
- return page;
- }
+ pte_t *pte = pte_alloc_one_kernel(mm, addr);
+ if (pte)
+ return virt_to_page(pte);
return NULL;
}
free_page((unsigned long)pte);
}
-static inline void pte_free_kernel(pte_t *pte)
-{
- virt_to_page(pte)->mapping = NULL;
- free_pte_fast(pte);
-}
-
-static inline void pte_free(struct page *ptepage)
-{
- ptepage->mapping = NULL;
- free_pte_fast(page_address(ptepage));
-}
-
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(page_address(pte))
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#include <linux/sched.h>
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
* is different so we can optimize correctly for 32-bit tasks.
*/
#define REAL_PTRS_PER_PMD (1UL << PMD_BITS)
-
-/* This is gross, but unless we do this gcc retests the
- * thread flag every interation in pmd traversal loops.
- */
-extern unsigned long __ptrs_per_pmd(void) __attribute_const__;
-#define PTRS_PER_PMD __ptrs_per_pmd()
+#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \
+ (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : \
+ (REAL_PTRS_PER_PMD)))
/*
* We cannot use the top address range because VPTE table lives there. This
((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U)
* Undefined behaviour if not..
*/
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
-#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
+#define pte_exec(pte) pte_read(pte)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
-/* Actual page table PTE updates. */
-extern void tlb_batch_add(pte_t *ptep, pte_t orig);
-
-static inline void set_pte(pte_t *ptep, pte_t pte)
-{
- pte_t orig = *ptep;
-
- *ptep = pte;
- if (pte_present(orig))
- tlb_batch_add(ptep, orig);
-}
-
-#define pte_clear(ptep) set_pte((ptep), __pte(0UL))
-
extern pgd_t swapper_pg_dir[1];
/* These do nothing with the way I have things setup. */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {32768, 32768 }, \
+ {PAGE_SIZE, PAGE_SIZE }, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
#ifdef __KERNEL__
-#include <linux/config.h>
#include <linux/compat.h>
-#ifdef CONFIG_COMPAT
-
typedef union sigval32 {
int sival_int;
u32 sival_ptr;
} _sigpoll;
} _sifields;
} siginfo_t32;
-#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-#ifdef CONFIG_COMPAT
-
typedef struct sigevent32 {
sigval_t32 sigev_value;
int sigev_signo;
extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
-#endif /* CONFIG_COMPAT */
-
#endif /* __KERNEL__ */
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/config.h>
#include <linux/personality.h>
#include <linux/types.h>
#include <linux/compat.h>
};
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
struct __new_sigaction32 {
unsigned sa_handler;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
compat_sigset_t sa_mask;
};
-#endif
struct k_sigaction {
struct __new_sigaction sa;
};
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
struct __old_sigaction32 {
unsigned sa_handler;
compat_old_sigset_t sa_mask;
};
#endif
-#endif
-
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
} stack_t;
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
typedef struct sigaltstack32 {
u32 ss_sp;
int ss_flags;
compat_size_t ss_size;
} stack_t32;
-#endif
struct signal_deliver_cookie {
int restart_syscall;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
-/* arch/sparc64/lib/spinlock.S */
-extern void _raw_spin_lock(spinlock_t *lock);
+static __inline__ void _raw_spin_lock(spinlock_t *lock)
+{
+ __asm__ __volatile__(
+"1: ldstub [%0], %%g7\n"
+" brnz,pn %%g7, 2f\n"
+" membar #StoreLoad | #StoreStore\n"
+" .subsection 2\n"
+"2: ldub [%0], %%g7\n"
+" brnz,pt %%g7, 2b\n"
+" membar #LoadLoad\n"
+" b,a,pt %%xcc, 1b\n"
+" .previous\n"
+ : /* no outputs */
+ : "r" (lock)
+ : "g7", "memory");
+}
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
#include <asm/asi.h>
extern void __memmove(void *,const void *,__kernel_size_t);
+extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
extern void *__memset(void *,int,__kernel_size_t);
+extern void *__builtin_memcpy(void *,const void *,__kernel_size_t);
extern void *__builtin_memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
#define __HAVE_ARCH_MEMCPY
-extern void * memcpy(void *,const void *,__kernel_size_t);
+static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
+{
+ if(n) {
+ if(n <= 32) {
+ __builtin_memcpy(to, from, n);
+ } else {
+ __memcpy(to, from, n);
+ }
+ }
+ return to;
+}
+
+static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
+{
+ __memcpy(to, from, n);
+ return to;
+}
+
+#undef memcpy
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __nonconstant_memcpy((t),(f),(n)))
#define __HAVE_ARCH_MEMSET
current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
} \
- flush_tlb_pending(); \
save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
struct pt_regs *kregs;
struct exec_domain *exec_domain;
int preempt_count;
- int __pad;
unsigned long *utraps;
#define TI_FPREGS 0x00000500
/* We embed this in the uppermost byte of thread_info->flags */
-#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
-#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
-#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
-#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
-#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
+#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
+#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
+#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
+#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
#if PAGE_SHIFT == 13
#define THREAD_SIZE (2*PAGE_SIZE)
#define TIF_NEWSIGNALS 6 /* wants new-style signals */
#define TIF_32BIT 7 /* 32-bit binary */
#define TIF_NEWCHILD 8 /* just-spawned child process */
-/* TIF_* value 9 is available */
+
+/* XXX Make this one FAULT_CODE_BLKCOMMIT XXX */
+#define TIF_BLKCOMMIT 9 /* use ASI_BLK_COMMIT_* in copy_user_page */
+
#define TIF_POLLING_NRFLAG 10
#define TIF_SYSCALL_SUCCESS 11
/* NOTE: Thread flags >= 12 should be ones we have no interest
#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
+#define _TIF_BLKCOMMIT (1<<TIF_BLKCOMMIT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
#ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H
-#include <linux/config.h>
-#include <linux/swap.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
+#define tlb_flush(tlb) \
+do { if ((tlb)->fullmm) \
+ flush_tlb_mm((tlb)->mm);\
+} while (0)
-#define TLB_BATCH_NR 192
+#define tlb_start_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
-/*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
-#ifdef CONFIG_SMP
- #define FREE_PTE_NR 506
- #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
-#else
- #define FREE_PTE_NR 1
- #define tlb_fast_mode(bp) 1
-#endif
+#define tlb_end_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int pages_nr;
- unsigned int need_flush;
- unsigned int tlb_frozen;
- unsigned int tlb_nr;
- unsigned long freed;
- unsigned long vaddrs[TLB_BATCH_NR];
- struct page *pages[FREE_PTE_NR];
-};
+#define __tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { } while (0)
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm-generic/tlb.h>
-#ifdef CONFIG_SMP
-extern void smp_flush_tlb_pending(struct mm_struct *,
- unsigned long, unsigned long *);
-#endif
-
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
-
-static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
-{
- struct mmu_gather *mp = &per_cpu(mmu_gathers, smp_processor_id());
-
- BUG_ON(mp->tlb_nr);
-
- mp->mm = mm;
- mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
- mp->tlb_frozen = full_mm_flush;
- mp->freed = 0;
-
- return mp;
-}
-
-
-static inline void tlb_flush_mmu(struct mmu_gather *mp)
-{
- if (mp->need_flush) {
- mp->need_flush = 0;
- if (!tlb_fast_mode(mp)) {
- free_pages_and_swap_cache(mp->pages, mp->pages_nr);
- mp->pages_nr = 0;
- }
- }
-
-}
-
-#ifdef CONFIG_SMP
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
-#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
-#else
-#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
-#endif
-
-static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
-{
- unsigned long freed = mp->freed;
- struct mm_struct *mm = mp->mm;
- unsigned long rss = mm->rss;
-
- if (rss < freed)
- freed = rss;
- mm->rss = rss - freed;
-
- tlb_flush_mmu(mp);
-
- if (mp->tlb_frozen) {
- unsigned long context = mm->context;
-
- if (CTX_VALID(context))
- do_flush_tlb_mm(mm);
- mp->tlb_frozen = 0;
- } else
- flush_tlb_pending();
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp)
-{
- return mp->tlb_frozen;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
-{
- mp->need_flush = 1;
- if (tlb_fast_mode(mp)) {
- free_page_and_swap_cache(page);
- return;
- }
- mp->pages[mp->pages_nr++] = page;
- if (mp->pages_nr >= FREE_PTE_NR)
- tlb_flush_mmu(mp);
-}
-
-#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp,ptepage) pte_free(ptepage)
-#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define __pte_free_tlb(tlb, pte) pte_free(pte)
#endif /* _SPARC64_TLB_H */
/* TLB flush operations. */
-extern void flush_tlb_pending(void);
-
-#define flush_tlb_range(vma,start,end) \
- do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr) flush_tlb_pending()
-#define flush_tlb_mm(mm) flush_tlb_pending()
-
extern void __flush_tlb_all(void);
+extern void __flush_tlb_mm(unsigned long context, unsigned long r);
+extern void __flush_tlb_range(unsigned long context, unsigned long start,
+ unsigned long r, unsigned long end,
+ unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb_kernel_range(start,end) \
__flush_tlb_kernel_range(start,end)
+#define flush_tlb_mm(__mm) \
+do { if (CTX_VALID((__mm)->context)) \
+ __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
+} while (0)
+
+#define flush_tlb_range(__vma, start, end) \
+do { if (CTX_VALID((__vma)->vm_mm->context)) { \
+ unsigned long __start = (start)&PAGE_MASK; \
+ unsigned long __end = PAGE_ALIGN(end); \
+ __flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
+ SECONDARY_CONTEXT, __end, PAGE_SIZE, \
+ (__end - __start)); \
+ } \
+} while (0)
+
+#define flush_tlb_vpte_range(__mm, start, end) \
+do { if (CTX_VALID((__mm)->context)) { \
+ unsigned long __start = (start)&PAGE_MASK; \
+ unsigned long __end = PAGE_ALIGN(end); \
+ __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
+ SECONDARY_CONTEXT, __end, PAGE_SIZE, \
+ (__end - __start)); \
+ } \
+} while (0)
+
+#define flush_tlb_page(vma, page) \
+do { struct mm_struct *__mm = (vma)->vm_mm; \
+ if (CTX_VALID(__mm->context)) \
+ __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
+ SECONDARY_CONTEXT); \
+} while (0)
+
+#define flush_tlb_vpte_page(mm, addr) \
+do { struct mm_struct *__mm = (mm); \
+ if (CTX_VALID(__mm->context)) \
+ __flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
+ SECONDARY_CONTEXT); \
+} while (0)
+
#else /* CONFIG_SMP */
extern void smp_flush_tlb_all(void);
+extern void smp_flush_tlb_mm(struct mm_struct *mm);
+extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_tlb_all() smp_flush_tlb_all()
+#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
+#define flush_tlb_range(vma, start, end) \
+ smp_flush_tlb_range((vma)->vm_mm, start, end)
+#define flush_tlb_vpte_range(mm, start, end) \
+ smp_flush_tlb_range(mm, start, end)
#define flush_tlb_kernel_range(start, end) \
smp_flush_tlb_kernel_range(start, end)
+#define flush_tlb_page(vma, page) \
+ smp_flush_tlb_page((vma)->vm_mm, page)
+#define flush_tlb_vpte_page(mm, page) \
+ smp_flush_tlb_page((mm), page)
#endif /* ! CONFIG_SMP */
-extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long);
+static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ /* Note the signed type. */
+ long s = start, e = end, vpte_base;
+ /* Nobody should call us with start below VM hole and end above.
+ See if it is really true. */
+ BUG_ON(s > e);
+#if 0
+ /* Currently free_pgtables guarantees this. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+#endif
+ vpte_base = (tlb_type == spitfire ?
+ VPTE_BASE_SPITFIRE :
+ VPTE_BASE_CHEETAH);
+
+ flush_tlb_vpte_range(mm,
+ vpte_base + (s >> (PAGE_SHIFT - 3)),
+ vpte_base + (e >> (PAGE_SHIFT - 3)));
+}
#endif /* _SPARC64_TLBFLUSH_H */
#else
#define SUNOS_SYSCALL_TRAP TRAP(sunos_syscall)
#endif
-#ifdef CONFIG_COMPAT
#define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sys_call_table32)
-#else
-#define LINUX_32BIT_SYSCALL_TRAP BTRAP(0x110)
-#endif
#define LINUX_64BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table64)
#define GETCC_TRAP TRAP(getcc)
#define SETCC_TRAP TRAP(setcc)
} \
} while (0)
+extern int foo;
+
#endif
#endif
#define SIGIO_WRITE_IRQ 11
#define TELNETD_IRQ 12
#define XTERM_IRQ 13
-#define HUMFS_IRQ 14
-
-#define LAST_IRQ HUMFS_IRQ
+
+#define LAST_IRQ XTERM_IRQ
#define NR_IRQS (LAST_IRQ + 1)
#endif
unsigned cpu = smp_processor_id();
if(prev != next){
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ clear_bit(cpu, &prev->cpu_vm_mask);
+ set_bit(cpu, &next->cpu_vm_mask);
if(next != &init_mm)
CHOOSE_MODE((void) 0,
switch_mm_skas(next->context.skas.mm_fd));
extern struct page *arch_validate(struct page *page, int mask, int order);
#define HAVE_ARCH_VALIDATE
-#define devmem_is_allowed(x) 1
extern void arch_free_page(struct page *page, int order);
#define HAVE_ARCH_FREE_PAGE
/*
- * Copyright (C) 2000 - 2004 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
struct thread_struct {
int forking;
+ unsigned long kernel_stack;
int nsyscalls;
struct pt_regs regs;
unsigned long cr2;
#define INIT_THREAD \
{ \
.forking = 0, \
+ .kernel_stack = 0, \
.nsyscalls = 0, \
.regs = EMPTY_REGS, \
.cr2 = 0, \
return result + generic_ffs_for_find_next_bit(tmp);
}
-/*
- * find_first_bit - find the first set bit in a memory region
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
#define ffs(x) generic_ffs (x)
#define fls(x) generic_fls (x)
#define __va(x) ((void *)__phys_to_virt ((unsigned long)(x)))
-#define devmem_is_allowed(x) 1
-
#endif /* KERNEL */
#endif /* __V850_PAGE_H__ */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffff
struct compat_ipc64_perm {
compat_key_t key;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* TUX */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
};
extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-extern cpumask_t pci_bus_to_cpumask [256];
+extern cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
struct task_struct;
struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
int in_gate_area(struct task_struct *task, unsigned long addr);
-extern int devmem_is_allowed(unsigned long pagenr);
#endif
#endif /* __KERNEL__ */
#define MCA_bus__is_a_macro
+/*
+ * User space process size: 512GB - 1GB (default).
+ */
+#define TASK_SIZE (0x0000007fc0000000UL)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
-
-/*
- * User space process size: 512GB - 1GB (default).
- */
-#define TASK_SIZE_64 (0x0000007fc0000000UL)
-
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE_64)
-
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
*/
#define ARCH_HAS_SCHED_WAKE_IDLE
#endif
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
#endif /* __ASM_X86_64_PROCESSOR_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE , PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
static inline cpumask_t pcibus_to_cpumask(int bus)
{
- cpumask_t res;
- cpus_and(res, pci_bus_to_cpumask[bus], cpu_online_map);
- return res;
+ cpumask_t tmp;
+ cpus_and(tmp, mp_bus_to_cpumask[bus], cpu_online_map);
+ return tmp;
}
#define NODE_BALANCE_RATE 30 /* CHECKME */
#define __put_user_check(x,ptr,size) \
({ \
int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
#define __NR_utimes 235
__SYSCALL(__NR_utimes, sys_utimes)
#define __NR_vserver 236
-__SYSCALL(__NR_vserver, sys_vserver)
+__SYSCALL(__NR_vserver, sys_ni_syscall)
+#define __NR_vserver 236
+__SYSCALL(__NR_vserver, sys_ni_syscall)
#define __NR_mbind 237
__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_set_mempolicy 238
__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
#define __NR_kexec_load 246
__SYSCALL(__NR_kexec_load, sys_ni_syscall)
-#define __NR_ioprio_set 247
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set);
-#define __NR_ioprio_get 248
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get);
-#define __NR_syscall_max __NR_ioprio_get
+#define __NR_syscall_max __NR_kexec_load
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */
#include <linux/types.h>
#include <asm/param.h>
-#include <asm/byteorder.h>
/*
* comp_t is a 16-bit "floating" point number with a 3-bit base 8
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
-
-#ifdef __BIG_ENDIAN
-#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
-#else
-#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */
-#endif
+#define ABYTESEX 0x80 /* always set, allows to detect byteorder */
#ifdef __KERNEL__
#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10))
#define ata_id_has_wcache(dev) ((dev)->id[82] & (1 << 5))
#define ata_id_has_pm(dev) ((dev)->id[82] & (1 << 3))
-#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 9))
-#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 8))
+#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8))
+#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9))
#define ata_id_removeable(dev) ((dev)->id[0] & (1 << 7))
#define ata_id_u32(dev,n) \
(((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)]))
struct atm_iobuf {
int length;
- void __user *buffer;
+ void *buffer;
};
/* for ATM_GETCIRANGE / ATM_SETCIRANGE */
#define ATM_CI_MAX -1 /* use maximum range of VPI/VCI */
struct atm_cirange {
- signed char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
- signed char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
+ char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
+ char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
};
/* for ATM_SETSC; actually taken from the ATM_VF number space */
*
* ATM Lan Emulation Daemon vs. driver interface
*
- * mkiiskila@yahoo.com
+ * carnil@cs.tut.fi
*
*/
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6 /* contains user pages */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
-extern void bio_unmap_user(struct bio *);
+extern void bio_unmap_user(struct bio *, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
-extern int bio_uncopy_user(struct bio *);
#ifdef CONFIG_HIGHMEM
/*
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
-extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
+extern int scsi_cmd_ioctl(struct gendisk *, unsigned int, void __user *);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int);
+extern int blk_rq_unmap_user(struct request *, void __user *, struct bio *, unsigned int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
int generic_cont_expand(struct inode *inode, loff_t size) ;
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
-void flush_inode_pages (struct inode * inode);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
extern int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip,
struct file *fp);
extern int cdrom_release(struct cdrom_device_info *cdi, struct file *fp);
-extern int cdrom_ioctl(struct file *file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg);
+extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct inode *ip,
+ unsigned int cmd, unsigned long arg);
extern int cdrom_media_changed(struct cdrom_device_info *);
extern int register_cdrom(struct cdrom_device_info *cdi);
+++ /dev/null
-/* linux/drivers/block/ckrm_io.c : Block I/O Resource Controller for CKRM
- *
- * Copyright (C) Shailabh Nagar, IBM Corp. 2004
- *
- *
- * Provides best-effort block I/O bandwidth control for CKRM
- * This file provides the CKRM API. The underlying scheduler is a
- * modified Complete-Fair Queueing (CFQ) iosched.
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- *
- * 29 July 2004
- * Third complete rewrite for CKRM's current API
- *
- */
-
-
-#ifndef _LINUX_CKRM_IO_H
-#define _LINUX_CKRM_IO_H
-
-typedef void *(*icls_tsk_t) (struct task_struct *tsk);
-typedef int (*icls_ioprio_t) (struct task_struct *tsk);
-
-#ifdef CONFIG_CKRM_RES_BLKIO
-
-extern void *cki_tsk_icls (struct task_struct *tsk);
-extern int cki_tsk_ioprio (struct task_struct *tsk);
-
-#endif /* CONFIG_CKRM_RES_BLKIO */
-
-#endif
*
* Latest version, more details at http://ckrm.sf.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
*/
CKRM_EVENT_EXEC,
CKRM_EVENT_UID,
CKRM_EVENT_GID,
- CKRM_EVENT_XID,
CKRM_EVENT_LOGIN,
CKRM_EVENT_USERADD,
CKRM_EVENT_USERDEL,
CKRM_DEF_CB_ARG(EXEC, exec, const char *);
CKRM_DEF_CB(UID, uid);
CKRM_DEF_CB(GID, gid);
-CKRM_DEF_CB_ARG(XID, xid, struct task_struct *);
CKRM_DEF_CB(APPTAG, apptag);
CKRM_DEF_CB(LOGIN, login);
CKRM_DEF_CB_ARG(USERADD, useradd, struct user_struct *);
*
* Latest version, more details at http://ckrm.sf.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
*/
#ifdef CONFIG_CKRM
-#include <linux/ckrm.h> // getting the event names
+#include "ckrm.h" // getting the event names
/* Action parameters identifying the cause of a task<->class notify callback
* these can perculate up to user daemon consuming records send by the
#include <linux/list.h>
-#define CLASSQUEUE_SIZE 1024 // acb: changed from 128
-//#define CLASSQUEUE_SIZE 128
+#define CLASSQUEUE_SIZE 128
#define CQ_BITMAP_SIZE ((((CLASSQUEUE_SIZE+1+7)/8)+sizeof(long)-1)/sizeof(long))
/**
cq_node_t *classqueue_get_head(struct classqueue_struct *cq);
/*update the base priority of the classqueue*/
-void classqueue_update_base(struct classqueue_struct *cq);
+void classqueue_update_base(struct classqueue_struct *cq, int new_base);
/**
* class_compare_prio: compare the priority of this two nodes
+++ /dev/null
-/* include/linux/ckrm_mem.h : memory control for CKRM
- *
- * Copyright (C) Jiantao Kong, IBM Corp. 2003
- * (C) Shailabh Nagar, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2004
- *
- *
- * Memory control functions of the CKRM kernel API
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- *
- * 28 Aug 2003
- * Created.
- */
-
-#ifndef _LINUX_CKRM_MEM_H
-#define _LINUX_CKRM_MEM_H
-
-#ifdef CONFIG_CKRM_RES_MEM
-
-#include <linux/list.h>
-#include <linux/ckrm_rc.h>
-
-typedef struct ckrm_mem_res {
- unsigned long reclaim_flags;
- unsigned long flags;
- struct ckrm_core_class *core; // the core i am part of...
- struct ckrm_core_class *parent; // parent of the core i am part of....
- struct ckrm_shares shares;
- struct list_head mcls_list; // list of all 1-level classes
- struct list_head shrink_list; // list of classes need to be shrunk
- atomic_t nr_users; // # of references to this class/data structure
- atomic_t pg_total; // # of pages used by this class
- int pg_guar; // # of pages this class is guaranteed
- int pg_limit; // max # of pages this class can get
- int pg_borrowed; // # of pages this class borrowed from its parent
- int pg_lent; // # of pages this class lent to its children
- int pg_unused; // # of pages left to this class (after giving the
- // guarantees to children. need to borrow from parent if
- // more than this is needed.
- int nr_active[MAX_NR_ZONES];
- int nr_inactive[MAX_NR_ZONES];
- int tmp_cnt;
- int shrink_count;
- unsigned long last_shrink;
- int over_limit_failures;
- int hier; // hiearchy, root = 0
-} ckrm_mem_res_t;
-
-extern atomic_t ckrm_mem_real_count;
-extern unsigned int ckrm_tot_lru_pages;
-extern struct list_head ckrm_shrink_list;
-extern spinlock_t ckrm_mem_lock;
-extern struct ckrm_res_ctlr mem_rcbs;
-
-#define page_class(page) ((ckrm_mem_res_t*)((page)->memclass))
-
-// used to fill reclaim_flags, used only when memory is low in the system
-#define CLS_CLEAR (0) // class under its guarantee
-#define CLS_OVER_GUAR (1 << 0) // class is over its guarantee
-#define CLS_PARENT_OVER (1 << 1) // parent is over 110% mark over limit
-#define CLS_OVER_25 (1 << 2) // class over 25% mark bet guar(0) & limit(100)
-#define CLS_OVER_50 (1 << 3) // class over 50% mark bet guar(0) & limit(100)
-#define CLS_OVER_75 (1 << 4) // class over 75% mark bet guar(0) & limit(100)
-#define CLS_OVER_100 (1 << 5) // class over its limit
-#define CLS_OVER_110 (1 << 6) // class over 110% mark over limit
-#define CLS_FLAGS_ALL ( CLS_OVER_GUAR | CLS_PARENT_OVER | CLS_OVER_25 | \
- CLS_OVER_50 | CLS_OVER_75 | CLS_OVER_100 | CLS_OVER_110 )
-#define CLS_SHRINK_BIT (31) // used to both lock and set the bit
-#define CLS_SHRINK (1 << CLS_SHRINK_BIT) // shrink the given class
-
-// used in flags. set when a class is more than 90% of its maxlimit
-#define MEM_AT_LIMIT 1
-
-extern void ckrm_set_aggressive(ckrm_mem_res_t *);
-extern unsigned int ckrm_setup_reclamation(void);
-extern void ckrm_teardown_reclamation(void);
-extern void ckrm_get_reclaim_bits(unsigned int *, unsigned int *);
-extern void ckrm_init_mm_to_task(struct mm_struct *, struct task_struct *);
-extern void ckrm_mem_evaluate_mm(struct mm_struct *);
-extern void ckrm_at_limit(ckrm_mem_res_t *);
-extern int ckrm_memclass_valid(ckrm_mem_res_t *);
-#define ckrm_get_reclaim_flags(cls) ((cls)->reclaim_flags)
-
-#else
-
-#define ckrm_init_mm_to_current(a) do {} while (0)
-#define ckrm_mem_evaluate_mm(a) do {} while (0)
-#define ckrm_get_reclaim_flags(a) (0)
-#define ckrm_setup_reclamation() (0)
-#define ckrm_teardown_reclamation() do {} while (0)
-#define ckrm_get_reclaim_bits(a, b) do { *(a) = 0; *(b)= 0; } while (0)
-#define ckrm_init_mm_to_task(a,b) do {} while (0)
-
-#endif // CONFIG_CKRM_RES_MEM
-
-#endif //_LINUX_CKRM_MEM_H
-
+++ /dev/null
-/* include/linux/ckrm_mem_inline.h : memory control for CKRM
- *
- * Copyright (C) Jiantao Kong, IBM Corp. 2003
- * (C) Shailabh Nagar, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2004
- *
- *
- * Memory control functions of the CKRM kernel API
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- *
- * 28 Aug 2003
- * Created.
- */
-
-
-#ifndef _LINUX_CKRM_MEM_INLINE_H_
-#define _LINUX_CKRM_MEM_INLINE_H_
-
-#include <linux/rmap.h>
-#include <linux/mmzone.h>
-#include <linux/ckrm_mem.h>
-
-
-#ifdef CONFIG_CKRM_RES_MEM
-
-#define GET_MEM_CLASS(tsk) \
- ckrm_get_res_class(tsk->taskclass, mem_rcbs.resid, ckrm_mem_res_t)
-
-#define ckrm_set_shrink(cls) \
- set_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
-#define ckrm_test_set_shrink(cls) \
- test_and_set_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
-#define ckrm_clear_shrink(cls) \
- clear_bit(CLS_SHRINK_BIT, (unsigned long *)&(cls)->reclaim_flags)
-
-#define ckrm_shrink_list_empty() list_empty(&ckrm_shrink_list)
-
-/*
- * Currently, the class of an address is assigned to the class with max
- * available guarantee. Simply replace this function for other policies.
- */
-static inline int
-ckrm_mem_share_compare(ckrm_mem_res_t *a, ckrm_mem_res_t *b)
-{
- if (a == NULL)
- return -(b != NULL) ;
- if (b == NULL)
- return 0;
- if (a->pg_guar == CKRM_SHARE_DONTCARE)
- return 1;
- if (b->pg_guar == CKRM_SHARE_DONTCARE)
- return -1;
- return (a->pg_unused - b->pg_unused);
-}
-
-static inline void
-mem_class_get(ckrm_mem_res_t *cls)
-{
- if (cls)
- atomic_inc(&((cls)->nr_users));
-}
-
-static inline void
-mem_class_put(ckrm_mem_res_t *cls)
-{
- const char *name;
-
- if (cls && atomic_dec_and_test(&(cls->nr_users)) ) {
- if (cls->core == NULL) {
- name = "unknown";
- } else {
- name = cls->core->name;
- }
- printk(KERN_DEBUG "freeing memclass %p of <core:%s>\n", cls, name);
-
- // BUG_ON(ckrm_memclass_valid(cls));
- // kfree(cls);
- }
-}
-
-static inline void
-incr_use_count(ckrm_mem_res_t *cls, int borrow)
-{
- atomic_inc(&cls->pg_total);
-
- if (borrow)
- cls->pg_lent++;
- if ((cls->pg_guar == CKRM_SHARE_DONTCARE) ||
- (atomic_read(&cls->pg_total) > cls->pg_unused)) {
- ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
- mem_rcbs.resid, ckrm_mem_res_t);
- if (parcls) {
- incr_use_count(parcls, 1);
- cls->pg_borrowed++;
- }
- } else {
- atomic_inc(&ckrm_mem_real_count);
- }
- if ((cls->pg_limit != CKRM_SHARE_DONTCARE) &&
- (atomic_read(&cls->pg_total) >= cls->pg_limit) &&
- ((cls->flags & MEM_AT_LIMIT) != MEM_AT_LIMIT)) {
- ckrm_at_limit(cls);
- }
- return;
-}
-
-static inline void
-decr_use_count(ckrm_mem_res_t *cls, int borrowed)
-{
- atomic_dec(&cls->pg_total);
- if (borrowed)
- cls->pg_lent--;
- if (cls->pg_borrowed > 0) {
- ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
- mem_rcbs.resid, ckrm_mem_res_t);
- if (parcls) {
- decr_use_count(parcls, 1);
- cls->pg_borrowed--;
- return;
- }
- }
- atomic_dec(&ckrm_mem_real_count);
-}
-
-static inline void
-ckrm_set_page_class(struct page *page, ckrm_mem_res_t *cls)
-{
- if (mem_rcbs.resid != -1 && cls != NULL) {
- if (unlikely(page->memclass)) {
- mem_class_put(page->memclass);
- }
- page->memclass = cls;
- mem_class_get(cls);
- } else {
- page->memclass = NULL;
- }
-}
-
-static inline void
-ckrm_set_pages_class(struct page *pages, int numpages, ckrm_mem_res_t *cls)
-{
- int i;
- for (i = 0; i < numpages; pages++, i++) {
- ckrm_set_page_class(pages, cls);
- }
-}
-
-static inline void
-ckrm_clear_page_class(struct page *page)
-{
- if (page->memclass != NULL) {
- mem_class_put(page->memclass);
- page->memclass = NULL;
- }
-}
-
-static inline void
-ckrm_clear_pages_class(struct page *pages, int numpages)
-{
- int i;
- for (i = 0; i < numpages; pages++, i++) {
- ckrm_clear_page_class(pages);
- }
-}
-
-static inline void
-ckrm_change_page_class(struct page *page, ckrm_mem_res_t *newcls)
-{
- ckrm_mem_res_t *oldcls = page_class(page);
-
- if (!newcls || oldcls == newcls)
- return;
-
- ckrm_clear_page_class(page);
- ckrm_set_page_class(page, newcls);
- if (test_bit(PG_ckrm_account, &page->flags)) {
- decr_use_count(oldcls, 0);
- incr_use_count(newcls, 0);
- if (PageActive(page)) {
- oldcls->nr_active[page_zonenum(page)]--;
- newcls->nr_active[page_zonenum(page)]++;
- } else {
- oldcls->nr_inactive[page_zonenum(page)]--;
- newcls->nr_inactive[page_zonenum(page)]++;
- }
- }
-}
-
-static inline void
-ckrm_change_pages_class(struct page *pages, int numpages,
- ckrm_mem_res_t *cls)
-{
- int i;
- for (i = 0; i < numpages; pages++, i++) {
- ckrm_change_page_class(pages, cls);
- }
-}
-
-static inline void
-ckrm_mem_inc_active(struct page *page)
-{
- ckrm_mem_res_t *cls = page_class(page), *curcls;
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(test_bit(PG_ckrm_account, &page->flags));
- if (unlikely(cls != (curcls = GET_MEM_CLASS(current)))) {
- cls = curcls;
- ckrm_change_page_class(page, cls);
- }
- cls->nr_active[page_zonenum(page)]++;
- incr_use_count(cls, 0);
- set_bit(PG_ckrm_account, &page->flags);
-}
-
-static inline void
-ckrm_mem_dec_active(struct page *page)
-{
- ckrm_mem_res_t *cls = page_class(page);
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(!test_bit(PG_ckrm_account, &page->flags));
- cls->nr_active[page_zonenum(page)]--;
- decr_use_count(cls, 0);
- clear_bit(PG_ckrm_account, &page->flags);
-}
-
-static inline void
-ckrm_mem_inc_inactive(struct page *page)
-{
- ckrm_mem_res_t *cls = page_class(page), *curcls;
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(test_bit(PG_ckrm_account, &page->flags));
- if (unlikely(cls != (curcls = GET_MEM_CLASS(current)))) {
- cls = curcls;
- ckrm_change_page_class(page, cls);
- }
- cls->nr_inactive[page_zonenum(page)]++;
- incr_use_count(cls, 0);
- set_bit(PG_ckrm_account, &page->flags);
-}
-
-static inline void
-ckrm_mem_dec_inactive(struct page *page)
-{
- ckrm_mem_res_t *cls = page_class(page);
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(!test_bit(PG_ckrm_account, &page->flags));
- cls->nr_inactive[page_zonenum(page)]--;
- decr_use_count(cls, 0);
- clear_bit(PG_ckrm_account, &page->flags);
-}
-
-static inline int
-ckrm_kick_page(struct page *page, unsigned int bits)
-{
- if (page_class(page) == NULL) {
- return bits;
- } else {
- return (page_class(page)->reclaim_flags & bits);
- }
-}
-
-static inline int
-ckrm_class_limit_ok(ckrm_mem_res_t *cls)
-{
- if ((mem_rcbs.resid == -1) || !cls) {
- return 1;
- }
- if (cls->pg_limit == CKRM_SHARE_DONTCARE) {
- ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
- mem_rcbs.resid, ckrm_mem_res_t);
- return (!parcls ?: ckrm_class_limit_ok(parcls));
- } else {
- return (atomic_read(&cls->pg_total) <= (11 * cls->pg_limit) / 10);
- }
-}
-
-#else // !CONFIG_CKRM_RES_MEM
-
-#define ckrm_set_page_class(a,b) do{}while(0)
-#define ckrm_set_pages_class(a,b,c) do{}while(0)
-#define ckrm_clear_page_class(a) do{}while(0)
-#define ckrm_clear_pages_class(a,b) do{}while(0)
-#define ckrm_change_page_class(a,b) do{}while(0)
-#define ckrm_change_pages_class(a,b,c) do{}while(0)
-#define ckrm_mem_inc_active(a) do{}while(0)
-#define ckrm_mem_dec_active(a) do{}while(0)
-#define ckrm_mem_inc_inactive(a) do{}while(0)
-#define ckrm_mem_dec_inactive(a) do{}while(0)
-#define ckrm_shrink_list_empty() (1)
-#define ckrm_kick_page(a,b) (0)
-#define ckrm_class_limit_ok(a) (1)
-
-#endif // CONFIG_CKRM_RES_MEM
-
-#endif // _LINUX_CKRM_MEM_INLINE_H_
int num_classes;
/* state about my ce interaction */
- atomic_t ce_regd; // if CE registered
+ int ce_regd; // if CE registered
int ce_cb_active; // if Callbacks active
atomic_t ce_nr_users; // number of active transient calls
struct ckrm_eng_callback ce_callbacks; // callback engine
* OTHER
******************************************************************************/
-#define ckrm_get_res_class(rescls, resid, type) \
- ((type*) (((resid != -1) && ((rescls) != NULL) \
- && ((rescls) != (void *)-1)) ? \
- ((struct ckrm_core_class *)(rescls))->res_class[resid] : NULL))
-
+#define ckrm_get_res_class(rescls,resid,type) ((type*)((rescls)->res_class[resid]))
extern int ckrm_register_res_ctlr(struct ckrm_classtype *, ckrm_res_ctlr_t *);
extern int ckrm_unregister_res_ctlr(ckrm_res_ctlr_t *);
#ifndef _CKRM_SCHED_H
#define _CKRM_SCHED_H
+#define CC_BUG_ON_DO(cond,action) do { if (cond) action; BUG_ON(cond); } while(0)
+#define CC_BUG_ON(cond) BUG_ON(cond)
+
#include <linux/sched.h>
#include <linux/ckrm_rc.h>
#include <linux/ckrm_classqueue.h>
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+//update every second
+#define CVT_UPDATE_TICK (1*HZ/1 ?: 1)
+#define CLASS_BONUS_RATE 22 // shift from ns to increase class bonus
+#define PRIORITY_BONUS_RATE 0 // ?? Hubertus
+#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
struct prio_array {
- unsigned int nr_active;
+ int nr_active;
unsigned long bitmap[BITMAP_SIZE];
struct list_head queue[MAX_PRIO];
};
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-#define rq_active(p,rq) (get_task_lrq(p)->active)
-#define rq_expired(p,rq) (get_task_lrq(p)->expired)
-int __init init_ckrm_sched_res(void);
-#else
-#define rq_active(p,rq) (rq->active)
-#define rq_expired(p,rq) (rq->expired)
-static inline void init_ckrm_sched_res(void) {}
-static inline int ckrm_cpu_monitor_init(void) {return 0;}
-#endif //CONFIG_CKRM_CPU_SCHEDULE
-
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-struct ckrm_runqueue {
+struct ckrm_local_runqueue {
cq_node_t classqueue_linkobj; /*links in classqueue */
struct ckrm_cpu_class *cpu_class; // class it belongs to
struct classqueue_struct *classqueue; // classqueue it belongs tow
+ CVT_t uncounted_cvt;
unsigned long long uncounted_ns;
prio_array_t *active, *expired, arrays[2];
* updated on enqueue, dequeue
*/
int top_priority;
- CVT_t local_cvt;
-
- unsigned long lrq_load;
- int local_weight;
-
-
- /*
- * unused CPU time accumulated while thoe class
- * is inactive goes to savings
- *
- * initialized to be 0
- * a class can't accumulate more than SAVING_THRESHOLD of savings
- */
- unsigned long long savings;
-
+ CVT_t local_cvt; // snapshot of local_cvt, update on every loadbalance
unsigned long magic; //for debugging
};
-typedef struct ckrm_runqueue ckrm_lrq_t;
+/**
+ * @last_sleep: the last time it sleeps, last_sleep = 0 when not sleeping
+ */
+struct ckrm_cpu_class_local_stat {
+ unsigned long long run;
+ unsigned long long total;
+ unsigned long long last_sleep;
+ unsigned long cpu_demand; /*estimated cpu demand */
+};
/**
* ckrm_cpu_class_stat - cpu usage statistics maintained for each class
unsigned long long total_ns; /*how much nano-secs it has consumed */
- struct ckrm_cpu_demand_stat local_stats[NR_CPUS];
-
- /*
- *
- */
- unsigned long max_demand; /* the maximun a class can consume */
- int egrt,megrt; /*effective guarantee*/
- int ehl,mehl; /*effective hard limit, my effective hard limit*/
+ struct ckrm_cpu_class_local_stat local_stats[NR_CPUS];
+ unsigned long cpu_demand;
+ /*temp stat used by cpu monitor */
+ int effective_guarantee;
+ int effective_limit;
+ int glut; //true or false
/*
- * eshare: for both default class and its children
- * meshare: just for the default class
+ * effective_share: for both default class and its children
+ * self_effective_share: just for the default class
*/
- int eshare;
- int meshare;
+ int effective_share;
+ int self_effective_share;
};
-#define CKRM_CPU_CLASS_MAGIC 0x7af2abe3
-
-#define USAGE_SAMPLE_FREQ HZ //sample every 1 seconds
-#define NS_PER_SAMPLE (USAGE_SAMPLE_FREQ*(NSEC_PER_SEC/HZ))
-#define USAGE_WINDOW_SIZE 60 //keep the last 60 sample
-
-struct ckrm_usage {
- unsigned long samples[USAGE_WINDOW_SIZE]; //record usages
- unsigned long sample_pointer; //pointer for the sliding window
- unsigned long long last_ns; //ns for last sample
- long long last_sample_jiffies; //in number of jiffies
-};
+typedef struct ckrm_cpu_class_stat ckrm_stat_t;
/*
* manages the class status
struct ckrm_core_class *parent;
struct ckrm_shares shares;
spinlock_t cnt_lock; // always grab parent's lock first and then child's
+ CVT_t global_cvt; // total cummulative virtual time
struct ckrm_cpu_class_stat stat;
struct list_head links; // for linking up in cpu classes
- ckrm_lrq_t local_queues[NR_CPUS]; // runqueues
- struct ckrm_usage usage;
- unsigned long magic; //for debugging
+ struct ckrm_local_runqueue local_queues[NR_CPUS]; // runqueues
};
-#define cpu_class_weight(cls) (cls->stat.meshare)
-#define local_class_weight(lrq) (lrq->local_weight)
-
-static inline int valid_cpu_class(struct ckrm_cpu_class * cls)
-{
- return (cls && cls->magic == CKRM_CPU_CLASS_MAGIC);
-}
-
-struct classqueue_struct *get_cpu_classqueue(int cpu);
-struct ckrm_cpu_class * get_default_cpu_class(void);
-
-
-static inline void ckrm_usage_init(struct ckrm_usage* usage)
-{
- int i;
-
- for (i=0; i < USAGE_WINDOW_SIZE; i++)
- usage->samples[i] = 0;
- usage->sample_pointer = 0;
- usage->last_ns = 0;
- usage->last_sample_jiffies = 0;
-}
-
-/*
- * this function can be called at any frequency
- * it's self-contained
- */
-static inline void ckrm_sample_usage(struct ckrm_cpu_class* clsptr)
-{
- struct ckrm_usage* usage = &clsptr->usage;
- unsigned long long cur_sample;
- int duration = jiffies - usage->last_sample_jiffies;
-
- //jiffies wasn't start from 0
- //so it need to be properly handled
- if (unlikely(!usage->last_sample_jiffies))
- usage->last_sample_jiffies = jiffies;
-
- //called too frequenctly
- if (duration < USAGE_SAMPLE_FREQ)
- return;
-
- usage->last_sample_jiffies = jiffies;
-
- cur_sample = clsptr->stat.total_ns - usage->last_ns;
- usage->last_ns = clsptr->stat.total_ns;
+#if CONFIG_CKRM_CPU_SCHEDULE
+#define rq_active(p,rq) (get_task_class_queue(p)->active)
+#define rq_expired(p,rq) (get_task_class_queue(p)->expired)
+#else
+#define rq_active(p,rq) (rq->active)
+#define rq_expired(p,rq) (rq->expired)
+#endif
- //scale it based on the sample duration
- cur_sample *= ((USAGE_SAMPLE_FREQ<< 15)/duration);
- cur_sample >>= 15;
- usage->samples[usage->sample_pointer] = cur_sample;
- // printk("sample = %llu jiffies=%lu \n",cur_sample, jiffies);
+//#define cpu_class_weight(cls) (cls->shares.my_guarantee)
+#define cpu_class_weight(cls) (cls->stat.self_effective_share)
- usage->sample_pointer ++;
- if (usage->sample_pointer >= USAGE_WINDOW_SIZE)
- usage->sample_pointer = 0;
-}
+#define bpt_queue(cpu) (& (cpu_rq(cpu)->classqueue) )
+CVT_t get_min_cvt(int cpu);
-//duration is specified in number of jiffies
-//return the usage in percentage
-static inline int get_ckrm_usage(struct ckrm_cpu_class* clsptr, int duration)
-{
- int nr_samples = duration/USAGE_SAMPLE_FREQ?:1;
- struct ckrm_usage* usage = &clsptr->usage;
- unsigned long long total = 0;
- int i, idx;
-
- if (nr_samples > USAGE_WINDOW_SIZE)
- nr_samples = USAGE_WINDOW_SIZE;
-
- idx = usage->sample_pointer;
- for (i = 0; i< nr_samples; i++) {
- if (! idx)
- idx = USAGE_WINDOW_SIZE;
- idx --;
- total += usage->samples[idx];
- }
- total *= 100;
- do_div(total,nr_samples);
- do_div(total,NS_PER_SAMPLE);
- do_div(total,cpus_weight(cpu_online_map));
- return total;
-}
+struct classqueue_struct *get_cpu_classqueue(int cpu);
+extern struct ckrm_cpu_class default_cpu_class_obj;
+#define default_cpu_class (&default_cpu_class_obj)
-#define lrq_nr_running(lrq) \
- (lrq->active->nr_active + lrq->expired->nr_active)
+#define local_queue_nr_running(local_queue) \
+ (local_queue->active->nr_active + local_queue->expired->nr_active)
-static inline ckrm_lrq_t *
-get_ckrm_lrq(struct ckrm_cpu_class*cls, int cpu)
+static inline struct ckrm_local_runqueue *
+get_ckrm_local_runqueue(struct ckrm_cpu_class*cls, int cpu)
{
return &(cls->local_queues[cpu]);
}
-static inline ckrm_lrq_t *get_task_lrq(struct task_struct *p)
+static inline struct ckrm_local_runqueue *get_task_class_queue(struct task_struct *p)
{
return &(p->cpu_class->local_queues[task_cpu(p)]);
}
#define task_list_entry(list) list_entry(list,struct task_struct,run_list)
-#define class_list_entry(list) list_entry(list,struct ckrm_runqueue,classqueue_linkobj)
+#define class_list_entry(list) list_entry(list,struct ckrm_local_runqueue,classqueue_linkobj)
/* some additional interfaces exported from sched.c */
struct runqueue;
+void dequeue_task(struct task_struct *p, prio_array_t * array);
+void enqueue_task(struct task_struct *p, prio_array_t * array);
+struct runqueue *task_rq_lock(task_t * p, unsigned long *flags);
+void task_rq_unlock(struct runqueue *rq, unsigned long *flags);
+extern spinlock_t cvt_lock;
extern rwlock_t class_list_lock;
extern struct list_head active_cpu_classes;
-unsigned int task_timeslice(task_t *p);
-void _ckrm_cpu_change_class(task_t *task, struct ckrm_cpu_class *newcls);
+/*functions exported by ckrm_cpu_class.c*/
+int __init init_ckrm_sched_res(void);
void init_cpu_classes(void);
-void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares);
-void ckrm_cpu_change_class(void *task, void *old, void *new);
-
+/*functions exported by ckrm_cpu_monitor.c*/
+void ckrm_cpu_monitor(void);
+void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat);
#define CPU_DEMAND_ENQUEUE 0
#define CPU_DEMAND_DEQUEUE 1
#define CPU_DEMAND_DESCHEDULE 2
-#define CPU_DEMAND_INIT 3
-
-/*functions exported by ckrm_cpu_monitor.c*/
-void ckrm_cpu_monitor(int check_min);
-int ckrm_cpu_monitor_init(void);
-void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat);
-void cpu_demand_event(struct ckrm_cpu_demand_stat* local_stat, int event, unsigned long long len);
-void adjust_local_weight(void);
-
-#define get_task_lrq_stat(p) (&(p)->cpu_class->stat.local_stats[task_cpu(p)])
-#define get_cls_local_stat(cls,cpu) (&(cls)->stat.local_stats[cpu])
-#define get_rq_local_stat(lrq,cpu) (get_cls_local_stat((lrq)->cpu_class,cpu))
-
-/********************************************************************
- * Parameters that determine how quickly CVT's progress and how
- * priority can impact a LRQ's runqueue position. See also
- * get_effective_prio(). These parameters need to adjusted
- * in accordance to the following example and understanding.
- *
- * CLASS_QUANTIZER:
- *
- * A class with 50% share, can execute 500 ms / per sec ~ 2^29 ns.
- * It's share will be set to 512 = 2^9. The globl CLASSQUEUE_SIZE is set to 2^7.
- * With CLASS_QUANTIZER=16, the local_cvt of this class will increase
- * by 2^29/2^9 = 2^20 = 1024K.
- * Setting CLASS_QUANTIZER to 16, 2^(20-16) = 16 slots / per second.
- * Do the same math, a class with any share value, will cover 16 slots / per second.
- * So 2^8 total slots is good track for 8 seconds of system execution
- *
- * PRIORITY_QUANTIZER:
- *
- * How much can top priorities of class impact slot bonus.
- * There are 40 nice priorities, range from -20 to 19, with default nice = 0
- * "2" will allow upto 5 slots improvement
- * when certain task within the class has a nice value of -20
- * in the RQ thus for 50% class it can perform ~300 msec starvation.
- *
- *******************************************************************/
-
-#define CLASS_QUANTIZER 16 //shift from ns to increase class bonus
-#define PRIORITY_QUANTIZER 2 //controls how much a high prio task can borrow
-
-#define CKRM_SHARE_ACCURACY 13
-#define NSEC_PER_MS 1000000
-#define NSEC_PER_JIFFIES (NSEC_PER_SEC/HZ)
-
-
-#define MAX_SAVINGS_ABSOLUTE (10LLU*NSEC_PER_SEC) // 10 seconds
-
-#define CVT_UPDATE_TICK ((HZ/2)?:1)
-
-// ABSOLUTE_CKRM_TUNING determines whether classes can make up
-// lost time in absolute time or in relative values
-
-#define ABSOLUTE_CKRM_TUNING // preferred due to more predictable behavior
-
-#ifdef ABSOLUTE_CKRM_TUNING
-
-#define MAX_SAVINGS MAX_SAVINGS_ABSOLUTE
-//an absolute bonus of 200ms for classes when reactivated
-#define INTERACTIVE_BONUS(lrq) ((200*NSEC_PER_MS)/local_class_weight(lrq))
-#define SAVINGS_LEAK_SPEED (CVT_UPDATE_TICK/10*NSEC_PER_JIFFIES)
-
-#define scale_cvt(val,lrq) ((val)*local_class_weight(lrq))
-#define unscale_cvt(val,lrq) (do_div(val,local_class_weight(lrq)))
-
-#else
-
-#define MAX_SAVINGS (MAX_SAVINGS_ABSOLUTE >> CKRM_SHARE_ACCURACY)
-/*
- * to improve system responsiveness
- * an inactive class is put a little bit ahead of the current class when it wakes up
- * the amount is set in normalized term to simplify the calculation
- * for class with 100% share, it can be 2s ahead
- * while for class with 10% share, it can be 200ms ahead
- */
-#define INTERACTIVE_BONUS(lrq) (2*NSEC_PER_MS)
-
-/*
- * normalized savings can't be more than MAX_NORMALIZED_SAVINGS
- * based on the current configuration
- * this means that a class with share 100% will accumulate 10s at most
- * while a class with 1% of the share can only accumulate 100ms
- */
-
-//a class with share 100% can get 100ms every 500ms
-//while a class with share 10% can only get 10ms every 500ms
-#define SAVINGS_LEAK_SPEED ((CVT_UPDATE_TICK/5*NSEC_PER_JIFFIES) >> CKRM_SHARE_ACCURACY)
-
-#define scale_cvt(val,lrq) (val)
-#define unscale_cvt(val,lrq) (val)
-
-#endif
+void cpu_demand_event(struct ckrm_cpu_class_local_stat* local_stat, int event, unsigned long long len);
+#define get_task_local_stat(p) (&(p)->cpu_class->stat.local_stats[task_cpu(p)])
+#define get_rq_local_stat(lrq,cpu) (&(lrq)->cpu_class->stat.local_stats[cpu])
/**
* get_effective_prio: return the effective priority of a class local queue
* currently, prio increases by 1 if either: top_priority increase by one
* or, local_cvt increases by 4ms
*/
-static inline int get_effective_prio(ckrm_lrq_t * lrq)
+static inline int get_effective_prio(struct ckrm_local_runqueue * lcq)
{
int prio;
- prio = lrq->local_cvt >> CLASS_QUANTIZER; // cumulative usage
-#ifndef URGENCY_SUPPORT
-#warning "ACB removing urgency calculation from get_effective_prio"
-#else
- prio += lrq->top_priority >> PRIORITY_QUANTIZER; // queue urgency
-#endif
+ // cumulative usage
+ prio = lcq->local_cvt >> CLASS_BONUS_RATE;
+ // queue urgency
+ prio += lcq->top_priority >> PRIORITY_BONUS_RATE;
return prio;
}
-CVT_t get_local_cur_cvt(int cpu);
-
/**
* update_class_priority:
*
* -- rq_get_next_task (queue switch)
* -- update_local_cvt
* -- schedule
+ * -- update_global_cvt
*/
-static inline void update_class_priority(ckrm_lrq_t *local_rq)
+static inline void update_class_priority(struct ckrm_local_runqueue *local_rq)
{
int effective_prio = get_effective_prio(local_rq);
classqueue_update_prio(local_rq->classqueue,
* set the new top priority and reposition the queue
* called when: task enqueue/dequeue and queue switch
*/
-static inline void set_top_priority(ckrm_lrq_t *lrq,
+static inline void set_top_priority(struct ckrm_local_runqueue *class_queue,
int new_priority)
{
- lrq->top_priority = new_priority;
- update_class_priority(lrq);
-}
-
-/*
- * task_load: how much load this task counts
- */
-static inline unsigned long task_load(struct task_struct* p)
-{
- return (task_timeslice(p) * p->demand_stat.cpu_demand);
-}
-
-/*
- * runqueue load is the local_weight of all the classes on this cpu
- * must be called with class_list_lock held
- */
-static inline unsigned long ckrm_cpu_load(int cpu)
-{
- struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t* lrq;
- struct ckrm_cpu_demand_stat* l_stat;
- int total_load = 0;
- int load;
-
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- lrq = get_ckrm_lrq(clsptr,cpu);
- l_stat = get_cls_local_stat(clsptr,cpu);
- load = lrq->local_weight;
- if (l_stat->cpu_demand < load)
- load = l_stat->cpu_demand;
- total_load += load;
- }
- return total_load;
+ class_queue->top_priority = new_priority;
+ update_class_priority(class_queue);
}
static inline void class_enqueue_task(struct task_struct *p,
prio_array_t * array)
{
- ckrm_lrq_t *lrq;
+ struct ckrm_local_runqueue *queue;
int effective_prio;
- lrq = get_task_lrq(p);
-
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_ENQUEUE,0);
- lrq->lrq_load += task_load(p);
+ queue = get_task_class_queue(p);
- if ((p->prio < lrq->top_priority) && (array == lrq->active))
- set_top_priority(lrq, p->prio);
-
- if (! cls_in_classqueue(&lrq->classqueue_linkobj)) {
- cpu_demand_event(get_task_lrq_stat(p),CPU_DEMAND_ENQUEUE,0);
- effective_prio = get_effective_prio(lrq);
- classqueue_enqueue(lrq->classqueue, &lrq->classqueue_linkobj, effective_prio);
+ if (! cls_in_classqueue(&queue->classqueue_linkobj)) {
+ cpu_demand_event(get_task_local_stat(p),CPU_DEMAND_ENQUEUE,0);
+ /*make sure the cvt of this class is up to date*/
+ queue->local_cvt = get_min_cvt(task_cpu(p));
+ effective_prio = get_effective_prio(queue);
+ classqueue_enqueue(queue->classqueue, &queue->classqueue_linkobj, effective_prio);
}
+
+ if ((p->prio < queue->top_priority) && (array == queue->active))
+ set_top_priority(queue, p->prio);
}
static inline void class_dequeue_task(struct task_struct *p,
prio_array_t * array)
{
- ckrm_lrq_t *lrq = get_task_lrq(p);
- unsigned long load = task_load(p);
+ struct ckrm_local_runqueue *queue = get_task_class_queue(p);
- BUG_ON(lrq->lrq_load < load);
- lrq->lrq_load -= load;
-
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_DEQUEUE,0);
-
- if ((array == lrq->active) && (p->prio == lrq->top_priority)
+ if ((array == queue->active) && (p->prio == queue->top_priority)
&& list_empty(&(array->queue[p->prio])))
- set_top_priority(lrq,
+ set_top_priority(queue,
find_next_bit(array->bitmap, MAX_PRIO,
p->prio));
}
*/
static inline void update_local_cvt(struct task_struct *p, unsigned long nsec)
{
- ckrm_lrq_t * lrq = get_task_lrq(p);
-
- unsigned long cvt_inc = nsec / local_class_weight(lrq);
-
- lrq->local_cvt += cvt_inc;
- lrq->uncounted_ns += nsec;
+ struct ckrm_local_runqueue *class_queue = get_task_class_queue(p);
+ struct ckrm_cpu_class *cls = class_queue->cpu_class;
- update_class_priority(lrq);
-}
+ unsigned long cvt_inc = nsec / cpu_class_weight(cls);
-static inline int class_preempts_curr(struct task_struct * p, struct task_struct* curr)
-{
- struct cq_node_struct* node1 = &(get_task_lrq(p)->classqueue_linkobj);
- struct cq_node_struct* node2 = &(get_task_lrq(curr)->classqueue_linkobj);
+ class_queue->local_cvt += cvt_inc;
+ class_queue->uncounted_cvt += cvt_inc;
- return (class_compare_prio(node1,node2) < 0);
+ class_queue->uncounted_ns += nsec;
+ update_class_priority(class_queue);
}
/*
- * return a random value with range [0, (val-1)]
+ * called during loadbalancing
+ * to charge the class with locally accumulated cvt
*/
-static inline int get_ckrm_rand(unsigned long val)
-{
- int rand;
- static int last_rand[NR_CPUS];
- int cpu = smp_processor_id();
-
- rand = last_rand[cpu];
- rand ++;
- if (rand >= val)
- rand = 0;
-
- last_rand[cpu] = rand;
- return rand;
-}
-
-void update_class_cputime(int this_cpu);
+void update_global_cvts(int this_cpu);
-/**********************************************/
-/* PID_LOAD_BALANCING */
-/**********************************************/
-struct ckrm_load_struct {
- unsigned long load_p; /*propotional*/
- unsigned long load_i; /*integral */
- long load_d; /*derivative */
-};
-
-typedef struct ckrm_load_struct ckrm_load_t;
-
-static inline void ckrm_load_init(ckrm_load_t* ckrm_load) {
- ckrm_load->load_p = 0;
- ckrm_load->load_i = 0;
- ckrm_load->load_d = 0;
-}
-
-void ckrm_load_sample(ckrm_load_t* ckrm_load,int cpu);
-long pid_get_pressure(ckrm_load_t* ckrm_load, int local_group);
-#define rq_ckrm_load(rq) (&((rq)->ckrm_load))
-
-static inline void ckrm_sched_tick(unsigned long j,int this_cpu,struct ckrm_load_struct* ckrm_load)
+/**
+ *
+ */
+static inline int class_preempts_curr(struct task_struct * p, struct task_struct* curr)
{
- read_lock(&class_list_lock);
-
-#ifdef CONFIG_SMP
- ckrm_load_sample(ckrm_load,this_cpu);
-#endif
-
- if (! (j % CVT_UPDATE_TICK)) {
- // printk("ckrm_sched j=%lu\n",j);
- classqueue_update_base(get_cpu_classqueue(this_cpu));
- update_class_cputime(this_cpu);
- }
+ struct cq_node_struct* node1 = &(get_task_class_queue(p)->classqueue_linkobj);
+ struct cq_node_struct* node2 = &(get_task_class_queue(curr)->classqueue_linkobj);
- read_unlock(&class_list_lock);
+ return (class_compare_prio(node1,node2) < 0);
}
-
-#endif //CONFIG_CKRM_CPU_SCHEDULE
-
#endif
* data structure for /proc/sys/... files
*/
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp, loff_t * ppos );
+ void __user * buffer, size_t * lenp );
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp, loff_t * ppos );
+ size_t * lenp );
/* these functions are called to form the content of /proc/fs/coda/... files */
int coda_vfs_stats_get_info( char * buffer, char ** start, off_t offset,
COMPATIBLE_IOCTL(DVD_AUTH)
/* Big L */
ULONG_IOCTL(LOOP_SET_FD)
-ULONG_IOCTL(LOOP_CHANGE_FD)
COMPATIBLE_IOCTL(LOOP_CLR_FD)
COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
struct vc_data;
struct console_font_op;
-struct console_font;
struct module;
/*
void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
- int (*con_font_get)(struct vc_data *, struct console_font *);
- int (*con_font_default)(struct vc_data *, struct console_font *, char *);
- int (*con_font_copy)(struct vc_data *, int);
+ int (*con_font_op)(struct vc_data *, struct console_font_op *);
int (*con_resize)(struct vc_data *, unsigned int, unsigned int);
int (*con_set_palette)(struct vc_data *, unsigned char *);
int (*con_scrolldelta)(struct vc_data *, int);
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
- struct console_font vc_font; /* Current VC font set */
+ struct console_font_op vc_font; /* Current VC font set */
unsigned short vc_video_erase_char; /* Background erase character */
/* VT terminal data */
unsigned int vc_state; /* Escape sequence parser state */
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
} })
-#define CPU_MASK_CPU0 \
-((cpumask_t) { { \
- [0] = 1UL \
-} })
-
#define cpus_addr(src) ((src).bits)
#define cpumask_scnprintf(buf, len, src) \
+++ /dev/null
-/*
- * crbce.h
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- *
- * This files contains the type definition of the record
- * created by the CRBCE CKRM classification engine
- *
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
-
-/*
- * Changes
- *
- * 2003-11-11 Created by H.Franke
- * 2003-12-01 Sanitized for Delivery by H.Franke
- *
- */
-
-#ifndef CRBCE_RECORDS_H
-#define CRBCE_RECORDS_H
-
-#ifdef __KERNEL__
-#include <linux/autoconf.h>
-#else
-#define CONFIG_CKRM
-#define CONFIG_CRBCE
-#define CONFIG_DELAY_ACCT
-#endif
-
-#include <linux/types.h>
-#include <linux/ckrm.h>
-#include <linux/ckrm_ce.h>
-
-#define CRBCE_UKCC_NAME "crbce_ukcc"
-#define CRBCE_UKCC_PATH "/mnt/relayfs"
-
-#define CRBCE_UKCC_PATH_NAME CRBCE_UKCC_PATH"/"CRBCE_UKCC_NAME
-
-#define CRBCE_MAX_CLASS_NAME_LEN 256
-
-/****************************************************************
- *
- * CRBCE EVENT SET is and extension to the standard CKRM_EVENTS
- *
- ****************************************************************/
-enum {
-
- /* we use the standard CKRM_EVENT_<..>
- * to identify reclassification cause actions
- * and extend by additional ones we need
- */
-
- /* up event flow */
-
- CRBCE_REC_EXIT = CKRM_NUM_EVENTS,
- CRBCE_REC_DATA_DELIMITER,
- CRBCE_REC_SAMPLE,
- CRBCE_REC_TASKINFO,
- CRBCE_REC_SYS_INFO,
- CRBCE_REC_CLASS_INFO,
- CRBCE_REC_KERNEL_CMD_DONE,
- CRBCE_REC_UKCC_FULL,
-
- /* down command issueance */
- CRBCE_REC_KERNEL_CMD,
-
- CRBCE_NUM_EVENTS
-};
-
-struct task_sample_info {
- uint32_t cpu_running;
- uint32_t cpu_waiting;
- uint32_t io_delayed;
- uint32_t memio_delayed;
-};
-
-/*********************************************
- * KERNEL -> USER records *
- *********************************************/
-
-/* we have records with either a time stamp or not */
-struct crbce_hdr {
- int type;
- pid_t pid;
-};
-
-struct crbce_hdr_ts {
- int type;
- pid_t pid;
- uint32_t jiffies;
- uint64_t cls;
-};
-
-/* individual records */
-
-struct crbce_rec_fork {
- struct crbce_hdr_ts hdr;
- pid_t ppid;
-};
-
-struct crbce_rec_data_delim {
- struct crbce_hdr_ts hdr;
- int is_stop; /* 0 start, 1 stop */
-};
-
-struct crbce_rec_task_data {
- struct crbce_hdr_ts hdr;
- struct task_sample_info sample;
- struct task_delay_info delay;
-};
-
-struct crbce_ukcc_full {
- struct crbce_hdr_ts hdr;
-};
-
-struct crbce_class_info {
- struct crbce_hdr_ts hdr;
- int action;
- int namelen;
- char name[CRBCE_MAX_CLASS_NAME_LEN];
-};
-
-/*********************************************
- * USER -> KERNEL records *
- *********************************************/
-
-enum crbce_kernel_cmd {
- CRBCE_CMD_START,
- CRBCE_CMD_STOP,
- CRBCE_CMD_SET_TIMER,
- CRBCE_CMD_SEND_DATA,
-};
-
-struct crbce_command {
- int type; /* we need this for the K->U reflection */
- int cmd;
- uint32_t len; /* added in the kernel for reflection */
-};
-
-#define set_cmd_hdr(rec,tok) \
-((rec).hdr.type=CRBCE_REC_KERNEL_CMD,(rec).hdr.cmd=(tok))
-
-struct crbce_cmd_done {
- struct crbce_command hdr;
- int rc;
-};
-
-struct crbce_cmd {
- struct crbce_command hdr;
-};
-
-struct crbce_cmd_send_data {
- struct crbce_command hdr;
- int delta_mode;
-};
-
-struct crbce_cmd_settimer {
- struct crbce_command hdr;
- uint32_t interval; /* in msec .. 0 means stop */
-};
-
-#endif
struct super_block *d_sb; /* The root of the dentry tree */
int d_mounted;
void *d_fsdata; /* fs-specific data */
- void * d_extra_attributes; /* TUX-specific data */
struct rcu_head d_rcu;
struct dcookie_struct *d_cookie; /* cookie, if any */
struct hlist_node d_hash; /* lookup hash list */
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_anon(struct hlist_head *);
extern int d_invalidate(struct dentry *);
-extern void flush_dentry_attributes(void);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
-char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
- struct dentry *root, struct vfsmount *rootmnt,
- char *buffer, int buflen);
-
extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
-
+
/* Allocation counts.. */
/**
#endif
-#define DEVPTS_SUPER_MAGIC 0x1cd1
-
#endif /* _LINUX_DEVPTS_FS_H */
+++ /dev/null
-/*
- * Kernel header file for Linux crash dumps.
- *
- * Created by: Matt Robinson (yakker@sgi.com)
- * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
- *
- * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
- * Copyright 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
- *
- * Most of this is the same old stuff from vmdump.h, except now we're
- * actually a stand-alone driver plugged into the block layer interface,
- * with the exception that we now allow for compression modes externally
- * loaded (e.g., someone can come up with their own).
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-/* This header file includes all structure definitions for crash dumps. */
-#ifndef _DUMP_H
-#define _DUMP_H
-
-#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
-
-#include <linux/list.h>
-#include <linux/notifier.h>
-#include <linux/dumpdev.h>
-#include <asm/ioctl.h>
-
-/*
- * Predefine default DUMP_PAGE constants, asm header may override.
- *
- * On ia64 discontinuous memory systems it's possible for the memory
- * banks to stop at 2**12 page alignments, the smallest possible page
- * size. But the system page size, PAGE_SIZE, is in fact larger.
- */
-#define DUMP_PAGE_SHIFT PAGE_SHIFT
-#define DUMP_PAGE_MASK PAGE_MASK
-#define DUMP_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
-#define DUMP_HEADER_OFFSET PAGE_SIZE
-
-#define OLDMINORBITS 8
-#define OLDMINORMASK ((1U << OLDMINORBITS) -1)
-
-/* keep DUMP_PAGE_SIZE constant to 4K = 1<<12
- * it may be different from PAGE_SIZE then.
- */
-#define DUMP_PAGE_SIZE 4096
-
-/*
- * Predefined default memcpy() to use when copying memory to the dump buffer.
- *
- * On ia64 there is a heads up function that can be called to let the prom
- * machine check monitor know that the current activity is risky and it should
- * ignore the fault (nofault). In this case the ia64 header will redefine this
- * macro to __dump_memcpy() and use it's arch specific version.
- */
-#define DUMP_memcpy memcpy
-
-/* necessary header files */
-#include <asm/dump.h> /* for architecture-specific header */
-
-/*
- * Size of the buffer that's used to hold:
- *
- * 1. the dump header (padded to fill the complete buffer)
- * 2. the possibly compressed page headers and data
- */
-#define DUMP_BUFFER_SIZE (64 * 1024) /* size of dump buffer */
-#define DUMP_HEADER_SIZE DUMP_BUFFER_SIZE
-
-/* standard header definitions */
-#define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */
-#define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */
-#define DUMP_VERSION_NUMBER 0x8 /* dump version number */
-#define DUMP_PANIC_LEN 0x100 /* dump panic string length */
-
-/* dump levels - type specific stuff added later -- add as necessary */
-#define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */
-#define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */
-#define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */
-#define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */
-#define DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */
-#define DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */
-
-
-/* dump compression options -- add as necessary */
-#define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */
-#define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */
-#define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */
-
-/* dump flags - any dump-type specific flags -- add as necessary */
-#define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */
-#define DUMP_FLAGS_SOFTBOOT 0x2 /* 2 stage soft-boot based dump */
-#define DUMP_FLAGS_NONDISRUPT 0X1 /* non-disruptive dumping */
-
-#define DUMP_FLAGS_TARGETMASK 0xf0000000 /* handle special case targets */
-#define DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */
-#define DUMP_FLAGS_NETDUMP 0x40000000 /* dump over the network */
-
-/* dump header flags -- add as necessary */
-#define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */
-#define DUMP_DH_RAW 0x1 /* raw page (no compression) */
-#define DUMP_DH_COMPRESSED 0x2 /* page is compressed */
-#define DUMP_DH_END 0x4 /* end marker on a full dump */
-#define DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */
-#define DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */
-#define DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */
-
-/* names for various dump parameters in /proc/kernel */
-#define DUMP_ROOT_NAME "sys/dump"
-#define DUMP_DEVICE_NAME "device"
-#define DUMP_COMPRESS_NAME "compress"
-#define DUMP_LEVEL_NAME "level"
-#define DUMP_FLAGS_NAME "flags"
-#define DUMP_ADDR_NAME "addr"
-
-#define DUMP_SYSRQ_KEY 'd' /* key to use for MAGIC_SYSRQ key */
-
-/* CTL_DUMP names: */
-enum
-{
- CTL_DUMP_DEVICE=1,
- CTL_DUMP_COMPRESS=3,
- CTL_DUMP_LEVEL=3,
- CTL_DUMP_FLAGS=4,
- CTL_DUMP_ADDR=5,
- CTL_DUMP_TEST=6,
-};
-
-
-/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
-#define DUMP_DPC_PAGE_SIZE (DUMP_PAGE_SIZE + 512)
-
-/* dump ioctl() control options */
-#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */
-#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */
-#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */
-#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */
-#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */
-#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */
-#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */
-#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */
-
-/* these ioctls are used only by netdump module */
-#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */
-#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */
-#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */
-#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */
-#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */
-#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */
-#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */
-#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */
-#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */
-#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* Take a manual dump */
-
-/*
- * Structure: __dump_header
- * Function: This is the header dumped at the top of every valid crash
- * dump.
- */
-struct __dump_header {
- /* the dump magic number -- unique to verify dump is valid */
- u64 dh_magic_number;
-
- /* the version number of this dump */
- u32 dh_version;
-
- /* the size of this header (in case we can't read it) */
- u32 dh_header_size;
-
- /* the level of this dump (just a header?) */
- u32 dh_dump_level;
-
- /*
- * We assume dump_page_size to be 4K in every case.
- * Store here the configurable system page size (4K, 8K, 16K, etc.)
- */
- u32 dh_page_size;
-
- /* the size of all physical memory */
- u64 dh_memory_size;
-
- /* the start of physical memory */
- u64 dh_memory_start;
-
- /* the end of physical memory */
- u64 dh_memory_end;
-
- /* the number of hardware/physical pages in this dump specifically */
- u32 dh_num_dump_pages;
-
- /* the panic string, if available */
- char dh_panic_string[DUMP_PANIC_LEN];
-
- /* timeval depends on architecture, two long values */
- struct {
- u64 tv_sec;
- u64 tv_usec;
- } dh_time; /* the time of the system crash */
-
- /* the NEW utsname (uname) information -- in character form */
- /* we do this so we don't have to include utsname.h */
- /* plus it helps us be more architecture independent */
- /* now maybe one day soon they'll make the [65] a #define! */
- char dh_utsname_sysname[65];
- char dh_utsname_nodename[65];
- char dh_utsname_release[65];
- char dh_utsname_version[65];
- char dh_utsname_machine[65];
- char dh_utsname_domainname[65];
-
- /* the address of current task (OLD = void *, NEW = u64) */
- u64 dh_current_task;
-
- /* what type of compression we're using in this dump (if any) */
- u32 dh_dump_compress;
-
- /* any additional flags */
- u32 dh_dump_flags;
-
- /* any additional flags */
- u32 dh_dump_device;
-} __attribute__((packed));
-
-/*
- * Structure: __dump_page
- * Function: To act as the header associated to each physical page of
- * memory saved in the system crash dump. This allows for
- * easy reassembly of each crash dump page. The address bits
- * are split to make things easier for 64-bit/32-bit system
- * conversions.
- *
- * dp_byte_offset and dp_page_index are landmarks that are helpful when
- * looking at a hex dump of /dev/vmdump,
- */
-struct __dump_page {
- /* the address of this dump page */
- u64 dp_address;
-
- /* the size of this dump page */
- u32 dp_size;
-
- /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
- u32 dp_flags;
-} __attribute__((packed));
-
-/*
- * Structure: __lkcdinfo
- * Function: This structure contains information needed for the lkcdutils
- * package (particularly lcrash) to determine what information is
- * associated to this kernel, specifically.
- */
-struct __lkcdinfo {
- int arch;
- int ptrsz;
- int byte_order;
- int linux_release;
- int page_shift;
- int page_size;
- u64 page_mask;
- u64 page_offset;
- int stack_offset;
-};
-
-#ifdef __KERNEL__
-
-/*
- * Structure: __dump_compress
- * Function: This is what an individual compression mechanism can use
- * to plug in their own compression techniques. It's always
- * best to build these as individual modules so that people
- * can put in whatever they want.
- */
-struct __dump_compress {
- /* the list_head structure for list storage */
- struct list_head list;
-
- /* the type of compression to use (DUMP_COMPRESS_XXX) */
- int compress_type;
- const char *compress_name;
-
- /* the compression function to call */
- u16 (*compress_func)(const u8 *, u16, u8 *, u16);
-};
-
-/* functions for dump compression registration */
-extern void dump_register_compression(struct __dump_compress *);
-extern void dump_unregister_compression(int);
-
-/*
- * Structure dump_mbank[]:
- *
- * For CONFIG_DISCONTIGMEM systems this array specifies the
- * memory banks/chunks that need to be dumped after a panic.
- *
- * For classic systems it specifies a single set of pages from
- * 0 to max_mapnr.
- */
-struct __dump_mbank {
- u64 start;
- u64 end;
- int type;
- int pad1;
- long pad2;
-};
-
-#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY 1
-#define DUMP_MBANK_TYPE_OTHER 2
-
-#define MAXCHUNKS 256
-extern int dump_mbanks;
-extern struct __dump_mbank dump_mbank[MAXCHUNKS];
-
-/* notification event codes */
-#define DUMP_BEGIN 0x0001 /* dump beginning */
-#define DUMP_END 0x0002 /* dump ending */
-
-/* Scheduler soft spin control.
- *
- * 0 - no dump in progress
- * 1 - cpu0 is dumping, ...
- */
-extern unsigned long dump_oncpu;
-extern void dump_execute(const char *, const struct pt_regs *);
-
-/*
- * Notifier list for kernel code which wants to be called
- * at kernel dump.
- */
-extern struct notifier_block *dump_notifier_list;
-static inline int register_dump_notifier(struct notifier_block *nb)
-{
- return notifier_chain_register(&dump_notifier_list, nb);
-}
-static inline int unregister_dump_notifier(struct notifier_block * nb)
-{
- return notifier_chain_unregister(&dump_notifier_list, nb);
-}
-
-extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
-static inline void dump(char * str, struct pt_regs * regs)
-{
- if (dump_function_ptr)
- dump_function_ptr(str, regs);
-}
-
-/*
- * Common Arch Specific Functions should be declared here.
- * This allows the C compiler to detect discrepancies.
- */
-extern void __dump_open(void);
-extern void __dump_cleanup(void);
-extern void __dump_init(u64);
-extern void __dump_save_regs(struct pt_regs *, const struct pt_regs *);
-extern int __dump_configure_header(const struct pt_regs *);
-extern int __dump_irq_enable(void);
-extern void __dump_irq_restore(void);
-extern int __dump_page_valid(unsigned long index);
-#ifdef CONFIG_SMP
-extern void __dump_save_other_cpus(void);
-#else
-#define __dump_save_other_cpus()
-#endif
-
-extern int manual_handle_crashdump(void);
-
-/* to track all used (compound + zero order) pages */
-#define PageInuse(p) (PageCompound(p) || page_count(p))
-
-#endif /* __KERNEL__ */
-
-#else /* !CONFIG_CRASH_DUMP */
-
-/* If not configured then make code disappear! */
-#define register_dump_watchdog(x) do { } while(0)
-#define unregister_dump_watchdog(x) do { } while(0)
-#define register_dump_notifier(x) do { } while(0)
-#define unregister_dump_notifier(x) do { } while(0)
-#define dump_in_progress() 0
-#define dump(x, y) do { } while(0)
-
-#endif /* !CONFIG_CRASH_DUMP */
-
-#endif /* _DUMP_H */
+++ /dev/null
-/*
- * linux/drivers/net/netconsole.h
- *
- * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
- *
- * This file contains the implementation of an IRQ-safe, crash-safe
- * kernel console implementation that outputs kernel messages to the
- * network.
- *
- * Modification history:
- *
- * 2001-09-17 started by Ingo Molnar.
- */
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#define NETCONSOLE_VERSION 0x03
-
-enum netdump_commands {
- COMM_NONE = 0,
- COMM_SEND_MEM = 1,
- COMM_EXIT = 2,
- COMM_REBOOT = 3,
- COMM_HELLO = 4,
- COMM_GET_NR_PAGES = 5,
- COMM_GET_PAGE_SIZE = 6,
- COMM_START_NETDUMP_ACK = 7,
- COMM_GET_REGS = 8,
- COMM_GET_MAGIC = 9,
- COMM_START_WRITE_NETDUMP_ACK = 10,
-};
-
-typedef struct netdump_req_s {
- u64 magic;
- u32 nr;
- u32 command;
- u32 from;
- u32 to;
-} req_t;
-
-enum netdump_replies {
- REPLY_NONE = 0,
- REPLY_ERROR = 1,
- REPLY_LOG = 2,
- REPLY_MEM = 3,
- REPLY_RESERVED = 4,
- REPLY_HELLO = 5,
- REPLY_NR_PAGES = 6,
- REPLY_PAGE_SIZE = 7,
- REPLY_START_NETDUMP = 8,
- REPLY_END_NETDUMP = 9,
- REPLY_REGS = 10,
- REPLY_MAGIC = 11,
- REPLY_START_WRITE_NETDUMP = 12,
-};
-
-typedef struct netdump_reply_s {
- u32 nr;
- u32 code;
- u32 info;
-} reply_t;
-
-#define HEADER_LEN (1 + sizeof(reply_t))
-
-
+++ /dev/null
-/*
- * Generic dump device interfaces for flexible system dump
- * (Enables variation of dump target types e.g disk, network, memory)
- *
- * These interfaces have evolved based on discussions on lkcd-devel.
- * Eventually the intent is to support primary and secondary or
- * alternate targets registered at the same time, with scope for
- * situation based failover or multiple dump devices used for parallel
- * dump i/o.
- *
- * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
- *
- * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
- * Copyright (C) 2002 International Business Machines Corp.
- *
- * This code is released under version 2 of the GNU GPL.
- */
-
-#ifndef _LINUX_DUMPDEV_H
-#define _LINUX_DUMPDEV_H
-
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/bio.h>
-
-/* Determined by the dump target (device) type */
-
-struct dump_dev;
-
-struct dump_dev_ops {
- int (*open)(struct dump_dev *, unsigned long); /* configure */
- int (*release)(struct dump_dev *); /* unconfigure */
- int (*silence)(struct dump_dev *); /* when dump starts */
- int (*resume)(struct dump_dev *); /* when dump is over */
- int (*seek)(struct dump_dev *, loff_t);
- /* trigger a write (async in nature typically) */
- int (*write)(struct dump_dev *, void *, unsigned long);
- /* not usually used during dump, but option available */
- int (*read)(struct dump_dev *, void *, unsigned long);
- /* use to poll for completion */
- int (*ready)(struct dump_dev *, void *);
- int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
-};
-
-struct dump_dev {
- char type_name[32]; /* block, net-poll etc */
- unsigned long device_id; /* interpreted differently for various types */
- struct dump_dev_ops *ops;
- struct list_head list;
- loff_t curr_offset;
-};
-
-/*
- * dump_dev type variations:
- */
-
-/* block */
-struct dump_blockdev {
- struct dump_dev ddev;
- dev_t dev_id;
- struct block_device *bdev;
- struct bio *bio;
- loff_t start_offset;
- loff_t limit;
- int err;
-};
-
-static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
-{
- return container_of(dev, struct dump_blockdev, ddev);
-}
-
-
-/* mem - for internal use by soft-boot based dumper */
-struct dump_memdev {
- struct dump_dev ddev;
- unsigned long indirect_map_root;
- unsigned long nr_free;
- struct page *curr_page;
- unsigned long *curr_map;
- unsigned long curr_map_offset;
- unsigned long last_offset;
- unsigned long last_used_offset;
- unsigned long last_bs_offset;
-};
-
-static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
-{
- return container_of(dev, struct dump_memdev, ddev);
-}
-
-/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
-struct dump_rdev {
- struct dump_dev ddev;
- char name[32];
- int (*reset)(struct dump_rdev *, unsigned int,
- unsigned long);
- /* ... to do ... */
-};
-
-/* just to get the size right when saving config across a soft-reboot */
-struct dump_anydev {
- union {
- struct dump_blockdev bddev;
- /* .. add other types here .. */
- };
-};
-
-
-
-/* Dump device / target operation wrappers */
-/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
-
-extern struct dump_dev *dump_dev;
-
-static inline int dump_dev_open(unsigned long arg)
-{
- return dump_dev->ops->open(dump_dev, arg);
-}
-
-static inline int dump_dev_release(void)
-{
- return dump_dev->ops->release(dump_dev);
-}
-
-static inline int dump_dev_silence(void)
-{
- return dump_dev->ops->silence(dump_dev);
-}
-
-static inline int dump_dev_resume(void)
-{
- return dump_dev->ops->resume(dump_dev);
-}
-
-static inline int dump_dev_seek(loff_t offset)
-{
- return dump_dev->ops->seek(dump_dev, offset);
-}
-
-static inline int dump_dev_write(void *buf, unsigned long len)
-{
- return dump_dev->ops->write(dump_dev, buf, len);
-}
-
-static inline int dump_dev_ready(void *buf)
-{
- return dump_dev->ops->ready(dump_dev, buf);
-}
-
-static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
-{
- if (!dump_dev || !dump_dev->ops->ioctl)
- return -EINVAL;
- return dump_dev->ops->ioctl(dump_dev, cmd, arg);
-}
-
-extern int dump_register_device(struct dump_dev *);
-extern void dump_unregister_device(struct dump_dev *);
-
-#endif /* _LINUX_DUMPDEV_H */
#ifndef _DVBOSD_H_
#define _DVBOSD_H_
-#include <linux/compiler.h>
-
typedef enum {
// All functions return -2 on "not open"
OSD_Close=1, // ()
#ifndef _DVBVIDEO_H_
#define _DVBVIDEO_H_
-#include <linux/compiler.h>
-
#ifdef __KERNEL__
#include <linux/types.h>
#else
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int);
-typedef void (elevator_set_congested_fn) (request_queue_t *);
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
elevator_put_req_fn *elevator_put_req_fn;
elevator_may_queue_fn *elevator_may_queue_fn;
- elevator_set_congested_fn *elevator_set_congested_fn;
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
extern int elv_register_queue(request_queue_t *q);
extern void elv_unregister_queue(request_queue_t *q);
extern int elv_may_queue(request_queue_t *, int);
-extern void elv_set_congested(request_queue_t *);
extern void elv_completed_request(request_queue_t *, struct request *);
extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
-#define RQ_ELV_DATA(rq) (rq)->elevator_private
-
#endif
#include <linux/types.h>
#include <asm/elf.h>
-#ifndef elf_read_implies_exec
- /* Executables for which elf_read_implies_exec() returns TRUE will
- have the READ_IMPLIES_EXEC personality flag set automatically.
- Override in asm/elf.h as needed. */
-# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
-#endif
-
/* 32-bit ELF base types. */
typedef __u32 Elf32_Addr;
typedef __u16 Elf32_Half;
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
-/* Defined for TUX async IO */
-#define EWOULDBLOCKIO 530 /* Would block due to block-IO */
-
#endif
#endif
#define EXT2_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT2_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
-#ifdef CONFIG_VSERVER_LEGACY
-#define EXT2_FL_USER_VISIBLE 0x0C03DFFF /* User visible flags */
-#define EXT2_FL_USER_MODIFIABLE 0x0C0380FF /* User modifiable flags */
-#else
#define EXT2_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT2_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-#endif
/*
* ioctl commands
#define EXT3_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-#ifdef CONFIG_VSERVER_LEGACY
-#define EXT3_FL_USER_VISIBLE 0x0C03DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE 0x0C0380FF /* User modifiable flags */
-#else
#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-#endif
/*
* Inode dynamic state flags
#ifdef CONFIG_JBD_DEBUG
#define EXT3_IOC_WAIT_FOR_READONLY _IOR('f', 99, long)
#endif
+#define EXT3_IOC_GETRSVSZ _IOR('r', 1, long)
+#define EXT3_IOC_SETRSVSZ _IOW('r', 2, long)
#ifdef CONFIG_VSERVER_LEGACY
#define EXT3_IOC_SETXID FIOC_SETXIDJ
#endif
/*
* Mount flags
*/
-#define EXT3_MOUNT_CHECK 0x0001 /* Do mount-time checks */
-#define EXT3_MOUNT_OLDALLOC 0x0002 /* Don't use the new Orlov allocator */
-#define EXT3_MOUNT_GRPID 0x0004 /* Create files with directory's group */
-#define EXT3_MOUNT_DEBUG 0x0008 /* Some debugging messages */
-#define EXT3_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
-#define EXT3_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
-#define EXT3_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
-#define EXT3_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */
-#define EXT3_MOUNT_NOLOAD 0x0100 /* Don't use existing journal*/
-#define EXT3_MOUNT_ABORT 0x0200 /* Fatal error detected */
-#define EXT3_MOUNT_DATA_FLAGS 0x0C00 /* Mode for data writes: */
- #define EXT3_MOUNT_JOURNAL_DATA 0x0400 /* Write data to journal */
- #define EXT3_MOUNT_ORDERED_DATA 0x0800 /* Flush data before commit */
- #define EXT3_MOUNT_WRITEBACK_DATA 0x0C00 /* No data ordering */
-#define EXT3_MOUNT_UPDATE_JOURNAL 0x1000 /* Update the journal format */
-#define EXT3_MOUNT_NO_UID32 0x2000 /* Disable 32-bit UIDs */
-#define EXT3_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */
-#define EXT3_MOUNT_POSIX_ACL 0x8000 /* POSIX Access Control Lists */
+#define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */
+#define EXT3_MOUNT_OLDALLOC 0x00002 /* Don't use the new Orlov allocator */
+#define EXT3_MOUNT_GRPID 0x00004 /* Create files with directory's group */
+#define EXT3_MOUNT_DEBUG 0x00008 /* Some debugging messages */
+#define EXT3_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
+#define EXT3_MOUNT_ERRORS_RO 0x00020 /* Remount fs ro on errors */
+#define EXT3_MOUNT_ERRORS_PANIC 0x00040 /* Panic on errors */
+#define EXT3_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
+#define EXT3_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
+#define EXT3_MOUNT_ABORT 0x00200 /* Fatal error detected */
+#define EXT3_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
+#define EXT3_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
+#define EXT3_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
+#define EXT3_MOUNT_WRITEBACK_DATA 0x00C00 /* No data ordering */
+#define EXT3_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */
+#define EXT3_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */
+#define EXT3_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */
+#define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
+#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
#define EXT3_MOUNT_TAG_XID 0x20000 /* Enable Context Tags */
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
#define FBIOGETCMAP 0x4604
#define FBIOPUTCMAP 0x4605
#define FBIOPAN_DISPLAY 0x4606
-#ifdef __KERNEL__
-#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
-#else
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor)
-#endif
/* 0x4607-0x460B are defined below */
/* #define FBIOGET_MONITORSPEC 0x460C */
/* #define FBIOPUT_MONITORSPEC 0x460D */
struct device;
struct file;
-struct fb_cmap_user {
- __u32 start; /* First entry */
- __u32 len; /* Number of entries */
- __u16 __user *red; /* Red values */
- __u16 __user *green;
- __u16 __user *blue;
- __u16 __user *transp; /* transparency, can be NULL */
-};
-
-struct fb_image_user {
- __u32 dx; /* Where to place image */
- __u32 dy;
- __u32 width; /* Size of image */
- __u32 height;
- __u32 fg_color; /* Only used when a mono bitmap */
- __u32 bg_color;
- __u8 depth; /* Depth of the image */
- const char __user *data; /* Pointer to image data */
- struct fb_cmap_user cmap; /* color map info */
-};
-
-struct fb_cursor_user {
- __u16 set; /* what to set */
- __u16 enable; /* cursor on/off */
- __u16 rop; /* bitop operation */
- const char __user *mask; /* cursor mask bits */
- struct fbcurpos hot; /* cursor hot spot */
- struct fb_image_user image; /* Cursor image */
-};
-
/*
* Register/unregister for framebuffer events
*/
/* drivers/video/fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
-extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to);
-extern int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to);
-extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info);
-extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info);
+extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto);
+extern int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *fb_info);
extern struct fb_cmap *fb_default_cmap(int len);
extern void fb_invert_cmaps(void);
struct files_struct *get_files_struct(struct task_struct *);
void FASTCALL(put_files_struct(struct files_struct *fs));
-extern int dupfd(struct file *file, unsigned int start);
-
#endif /* __LINUX_FILE_H */
#include <linux/cache.h>
#include <linux/prio_tree.h>
#include <linux/kobject.h>
-#include <linux/mount.h>
#include <asm/atomic.h>
struct iovec;
/* Fixed constants first: */
#undef NR_OPEN
#define NR_OPEN (1024*1024) /* Absolute upper limit on fd num */
-#define INR_OPEN 4096 /* Initial setting for nfile rlimits */
+#define INR_OPEN 1024 /* Initial setting for nfile rlimits */
#define BLOCK_SIZE_BITS 10
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
#define FMODE_READ 1
#define FMODE_WRITE 2
-/* Internal kernel extensions */
-#define FMODE_LSEEK 4
-#define FMODE_PREAD 8
-#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
-
#define RW_MASK 1
#define RWA_MASK 2
#define READ 0
*/
#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) __IS_FLG(inode, MS_RDONLY)
+#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
#define ATTR_ATTR_FLAG 1024
#define ATTR_KILL_SUID 2048
#define ATTR_KILL_SGID 4096
-#define ATTR_XID 8192
/*
* This is the Inode Attributes structure, used for notify_change(). It
umode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
- xid_t ia_xid;
loff_t ia_size;
struct timespec ia_atime;
struct timespec ia_mtime;
struct block_device * bd_contains;
unsigned bd_block_size;
struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
unsigned bd_part_count;
int bd_invalidated;
struct gendisk * bd_disk;
#include <linux/fcntl.h>
+extern long generic_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
+
extern int fcntl_getlk(struct file *, struct flock __user *);
extern int fcntl_setlk(struct file *, unsigned int, struct flock __user *);
ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- int (*check_flags)(int);
- int (*dir_notify)(struct file *filp, unsigned long arg);
+ long (*fcntl)(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
};
struct inode_operations {
static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
-
- if (MNT_IS_NOATIME(mnt))
- return;
- if (S_ISDIR(inode->i_mode) && MNT_IS_NODIRATIME(mnt))
- return;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
- return;
-
- update_atime(inode);
+ /* per-mountpoint checks will go here */
+ update_atime(dentry->d_inode);
}
static inline void file_accessed(struct file *file)
extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
- loff_t *, read_descriptor_t *, read_actor_t, int);
+ loff_t *, read_descriptor_t *, read_actor_t);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
extern int generic_file_open(struct inode * inode, struct file * filp);
-extern int nonseekable_open(struct inode * inode, struct file * filp);
static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor, int nonblock)
+ read_actor_t actor)
{
do_generic_mapping_read(filp->f_mapping,
&filp->f_ra,
filp,
ppos,
desc,
- actor,
- nonblock);
+ actor);
}
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
-extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
-
extern int inode_change_ok(struct inode *, struct iattr *);
extern int __must_check inode_setattr(struct inode *, struct iattr *);
-extern void inode_update_time(struct inode *inode, struct vfsmount *mnt, int ctime_too);
+extern void inode_update_time(struct inode *inode, int ctime_too);
static inline ino_t parent_ino(struct dentry *dentry)
{
{ }
#endif /* CONFIG_SECURITY */
-/* io priorities */
-
-#define IOPRIO_NR 21
-
-#define IOPRIO_IDLE 0
-#define IOPRIO_NORM 10
-#define IOPRIO_RT 20
-
-asmlinkage int sys_ioprio_set(int ioprio);
-asmlinkage int sys_ioprio_get(void);
-
-
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
void gs_set_termios (struct tty_struct * tty,
struct termios * old_termios);
int gs_init_port(struct gs_port *port);
-int gs_setserial(struct gs_port *port, struct serial_struct __user *sp);
-int gs_getserial(struct gs_port *port, struct serial_struct __user *sp);
+int gs_setserial(struct gs_port *port, struct serial_struct *sp);
+int gs_getserial(struct gs_port *port, struct serial_struct *sp);
void gs_got_break(struct gs_port *port);
extern int gs_debug;
struct gendisk {
int major; /* major number of driver */
int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
+ int minors;
char disk_name[32]; /* name of major driver */
struct hd_struct **part; /* [indexed by minor] */
struct block_device_operations *fops;
static inline void arch_free_page(struct page *page, int order) { }
#endif
-extern struct page *
+extern struct page *
FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
-static inline struct page * alloc_pages_node(int nid, unsigned int gfp_mask,
- unsigned int order)
+
+static inline struct page *alloc_pages_node(int nid, unsigned int gfp_mask,
+ unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
return NULL;
- return __alloc_pages(gfp_mask, order,
+ return __alloc_pages(gfp_mask, order,
NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
}
return vma->vm_flags & VM_HUGETLB;
}
-int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
+int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
#define ICMPV6_MGM_REPORT 131
#define ICMPV6_MGM_REDUCTION 132
-#define ICMPV6_NI_QUERY 139
-#define ICMPV6_NI_REPLY 140
+/* definitions for MLDv2 */
-#define ICMPV6_MLD2_REPORT 143
+#define MLD2_MODE_IS_INCLUDE 1
+#define MLD2_MODE_IS_EXCLUDE 2
+#define MLD2_CHANGE_TO_INCLUDE 3
+#define MLD2_CHANGE_TO_EXCLUDE 4
+#define MLD2_ALLOW_NEW_SOURCES 5
+#define MLD2_BLOCK_OLD_SOURCES 6
-#define ICMPV6_DHAAD_REQUEST 144
-#define ICMPV6_DHAAD_REPLY 145
-#define ICMPV6_MOBILE_PREFIX_SOL 146
-#define ICMPV6_MOBILE_PREFIX_ADV 147
+#define ICMPV6_MLD2_REPORT 143
+#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
/*
* Codes for Destination Unreachable
__u32 data[8];
};
-/*
- * Definitions for MLDv2
- */
-#define MLD2_MODE_IS_INCLUDE 1
-#define MLD2_MODE_IS_EXCLUDE 2
-#define MLD2_CHANGE_TO_INCLUDE 3
-#define MLD2_CHANGE_TO_EXCLUDE 4
-#define MLD2_ALLOW_NEW_SOURCES 5
-#define MLD2_BLOCK_OLD_SOURCES 6
-
-#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
-
#ifdef __KERNEL__
#include <linux/netdevice.h>
#define DRIVER(drive) ((drive)->driver)
-extern int generic_ide_ioctl(struct file *, struct block_device *, unsigned, unsigned long);
+extern int generic_ide_ioctl(struct block_device *, unsigned, unsigned long);
/*
* ide_hwifs[] is the master data structure used to keep track
struct packet_type;
struct vlan_collection;
struct vlan_dev_info;
-struct hlist_node;
#include <linux/proc_fs.h> /* for proc_dir_entry */
#include <linux/netdevice.h>
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
- struct hlist_node hlist; /* linked list */
struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN];
- struct rcu_head rcu;
+
+ struct vlan_group *next; /* the next in the list */
};
struct vlan_priority_tci_mapping {
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
.xid = 0, \
- .vx_info = NULL, \
.nid = 0, \
+ .vx_info = NULL, \
.nx_info = NULL, \
- .ioprio = IOPRIO_NORM, \
}
* out).
*/
struct ipmi_msg
-{
- unsigned char netfn;
- unsigned char cmd;
- unsigned short data_len;
- unsigned char __user *data;
-};
-
-struct kernel_ipmi_msg
{
unsigned char netfn;
unsigned char cmd;
ipmi_user_t user;
struct ipmi_addr addr;
long msgid;
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
/* The user_msg_data is the data supplied when a message was
sent, if this is a response to a sent message. If this is
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority);
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
int max_retries,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
* For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*
- * $Id: jffs2.h,v 1.33 2004/05/25 11:31:55 havasi Exp $
+ * $Id: jffs2.h,v 1.31 2003/10/04 08:33:05 dwmw2 Exp $
*
*/
#define JFFS2_COMPR_COPY 0x04
#define JFFS2_COMPR_DYNRUBIN 0x05
#define JFFS2_COMPR_ZLIB 0x06
-#define JFFS2_COMPR_LZO 0x07
-#define JFFS2_COMPR_LZARI 0x08
/* Compatibility flags. */
#define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
#define JFFS2_NODE_ACCURATE 0x2000
uint16_t v16;
} __attribute__((packed)) jint16_t;
+#define JFFS2_NATIVE_ENDIAN
+
+/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
+ whatever OS we're actually running on here too. */
+
+#if defined(JFFS2_NATIVE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){x})
+#define cpu_to_je32(x) ((jint32_t){x})
+#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
+
+#define je16_to_cpu(x) ((x).v16)
+#define je32_to_cpu(x) ((x).v32)
+#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
+#elif defined(JFFS2_BIG_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (be16_to_cpu(x.v16))
+#define je32_to_cpu(x) (be32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
+#elif defined(JFFS2_LITTLE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (le16_to_cpu(x.v16))
+#define je32_to_cpu(x) (le32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
+#else
+#error wibble
+#endif
+
struct jffs2_unknown_node
{
/* All start like this */
-/* $Id: jffs2_fs_i.h,v 1.16 2003/01/09 14:03:21 dwmw2 Exp $ */
+/* $Id: jffs2_fs_i.h,v 1.15 2002/11/12 09:42:49 dwmw2 Exp $ */
#ifndef _JFFS2_FS_I
#define _JFFS2_FS_I
uint16_t flags;
uint8_t usercompr;
-#if !defined (__ECOS)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
struct inode vfs_inode;
#endif
-#endif
};
#endif /* _JFFS2_FS_I */
struct console_font_op {
unsigned int op; /* operation code KD_FONT_OP_* */
unsigned int flags; /* KD_FONT_FLAG_* */
- unsigned int width, height; /* font size */
- unsigned int charcount;
- unsigned char __user *data; /* font data with height fixed to 32 */
-};
-
-struct console_font {
unsigned int width, height; /* font size */
unsigned int charcount;
unsigned char *data; /* font data with height fixed to 32 */
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
-#undef PORT_UNKNOWN
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+/* Cannot easily do prefetch unfortunately */
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ for (pos = (head)->first; n = pos ? pos->next : 0, pos; \
pos = n)
/**
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
-
-/**
- * hlist_for_each_entry_rcu - iterate over rcu list of given type
- * @pos: the type * to use as a loop counter.
- * @pos: the &struct hlist_node to use as a loop counter.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as hlist_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
-
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
#define MICROCODE_MINOR 184
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
-#define CRASH_DUMP_MINOR 230 /* LKCD */
#define MISC_DYNAMIC_MINOR 255
#define TUN_MINOR 200
extern unsigned long vmalloc_earlyreserve;
extern int page_cluster;
-extern int sysctl_legacy_va_layout;
-
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CKRM_RES_MEM
- void *memclass;
-#endif // CONFIG_CKRM_RES_MEM
};
/*
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+int shmem_lock(struct file * file, int lock, struct user_struct *);
int shmem_zero_setup(struct vm_area_struct *);
static inline int can_do_mlock(void)
return 1;
return 0;
}
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
+
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
-extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
-
-
-static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
-}
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
+extern unsigned int nr_used_zone_pages(void);
+
extern struct page * vmalloc_to_page(void *addr);
extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write);
-#include <linux/ckrm_mem_inline.h>
static inline void
add_page_to_active_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->active_list);
zone->nr_active++;
- ckrm_mem_inc_active(page);
}
static inline void
{
list_add(&page->lru, &zone->inactive_list);
zone->nr_inactive++;
- ckrm_mem_inc_inactive(page);
}
static inline void
{
list_del(&page->lru);
zone->nr_active--;
- ckrm_mem_dec_active(page);
}
static inline void
{
list_del(&page->lru);
zone->nr_inactive--;
- ckrm_mem_dec_inactive(page);
}
static inline void
if (PageActive(page)) {
ClearPageActive(page);
zone->nr_active--;
- ckrm_mem_dec_active(page);
} else {
zone->nr_inactive--;
- ckrm_mem_dec_inactive(page);
}
}
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
-static inline int is_highmem_idx(int idx)
-{
- return (idx == ZONE_HIGHMEM);
-}
-
-static inline int is_normal_idx(int idx)
-{
- return (idx == ZONE_NORMAL);
-}
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
*/
static inline int is_highmem(struct zone *zone)
{
- return (is_highmem_idx(zone - zone->zone_pgdat->node_zones));
+ return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
}
static inline int is_normal(struct zone *zone)
{
- return (is_normal_idx(zone - zone->zone_pgdat->node_zones));
+ return (zone - zone->zone_pgdat->node_zones == ZONE_NORMAL);
}
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
int lower_zone_protection_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
#define MNT_NOSUID 1
#define MNT_NODEV 2
#define MNT_NOEXEC 4
-#define MNT_RDONLY 8
-#define MNT_NOATIME 16
-#define MNT_NODIRATIME 32
struct vfsmount
{
struct namespace *mnt_namespace; /* containing namespace */
};
-#define MNT_IS_RDONLY(m) ((m) && ((m)->mnt_flags & MNT_RDONLY))
-#define MNT_IS_NOATIME(m) ((m) && ((m)->mnt_flags & MNT_NOATIME))
-#define MNT_IS_NODIRATIME(m) ((m) && ((m)->mnt_flags & MNT_NODIRATIME))
-
static inline struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.45 2004/07/20 02:44:27 dwmw2 Exp $
+ * $Id: cfi.h,v 1.35 2003/05/28 15:37:32 dwmw2 Exp $
*/
#ifndef __MTD_CFI_H__
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/mtd/flashchip.h>
-#include <linux/mtd/map.h>
#include <linux/mtd/cfi_endian.h>
+/*
+ * You can optimize the code size and performance by defining only
+ * the geometry(ies) available on your hardware.
+ * CFIDEV_INTERLEAVE_n, where represents the interleave (number of chips to fill the bus width)
+ * CFIDEV_BUSWIDTH_n, where n is the bus width in bytes (1, 2, 4 or 8 bytes)
+ *
+ * By default, all (known) geometries are supported.
+ */
+
+#ifndef CONFIG_MTD_CFI_GEOMETRY
+
+/* The default case - support all but 64-bit, which has
+ a performance penalty */
+
+#define CFIDEV_INTERLEAVE_1 (1)
+#define CFIDEV_INTERLEAVE_2 (2)
+#define CFIDEV_INTERLEAVE_4 (4)
+
+#define CFIDEV_BUSWIDTH_1 (1)
+#define CFIDEV_BUSWIDTH_2 (2)
+#define CFIDEV_BUSWIDTH_4 (4)
+
+typedef __u32 cfi_word;
+
+#else
+
+/* Explicitly configured buswidth/interleave support */
+
#ifdef CONFIG_MTD_CFI_I1
-#define cfi_interleave(cfi) 1
-#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
+#define CFIDEV_INTERLEAVE_1 (1)
+#endif
+#ifdef CONFIG_MTD_CFI_I2
+#define CFIDEV_INTERLEAVE_2 (2)
+#endif
+#ifdef CONFIG_MTD_CFI_I4
+#define CFIDEV_INTERLEAVE_4 (4)
+#endif
+#ifdef CONFIG_MTD_CFI_I8
+#define CFIDEV_INTERLEAVE_8 (8)
+#endif
+
+#ifdef CONFIG_MTD_CFI_B1
+#define CFIDEV_BUSWIDTH_1 (1)
+#endif
+#ifdef CONFIG_MTD_CFI_B2
+#define CFIDEV_BUSWIDTH_2 (2)
+#endif
+#ifdef CONFIG_MTD_CFI_B4
+#define CFIDEV_BUSWIDTH_4 (4)
+#endif
+#ifdef CONFIG_MTD_CFI_B8
+#define CFIDEV_BUSWIDTH_8 (8)
+#endif
+
+/* pick the largest necessary */
+#ifdef CONFIG_MTD_CFI_B8
+typedef __u64 cfi_word;
+
+/* This only works if asm/io.h is included first */
+#ifndef __raw_readll
+#define __raw_readll(addr) (*(volatile __u64 *)(addr))
+#endif
+#ifndef __raw_writell
+#define __raw_writell(v, addr) (*(volatile __u64 *)(addr) = (v))
+#endif
+#define CFI_WORD_64
+#else /* CONFIG_MTD_CFI_B8 */
+/* All others can use 32-bits. It's probably more efficient than
+ the smaller types anyway */
+typedef __u32 cfi_word;
+#endif /* CONFIG_MTD_CFI_B8 */
+
+#endif
+
+/*
+ * The following macros are used to select the code to execute:
+ * cfi_buswidth_is_*()
+ * cfi_interleave_is_*()
+ * [where * is either 1, 2, 4, or 8]
+ * Those macros should be used with 'if' statements. If only one of few
+ * geometry arrangements are selected, they expand to constants thus allowing
+ * the compiler (most of them being 0) to optimize away all the unneeded code,
+ * while still validating the syntax (which is not possible with embedded
+ * #if ... #endif constructs).
+ * The exception to this is the 64-bit versions, which need an extension
+ * to the cfi_word type, and cause compiler warnings about shifts being
+ * out of range.
+ */
+
+#ifdef CFIDEV_INTERLEAVE_1
+# ifdef CFIDEV_INTERLEAVE
+# undef CFIDEV_INTERLEAVE
+# define CFIDEV_INTERLEAVE (cfi->interleave)
+# else
+# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_1
+# endif
+# define cfi_interleave_is_1() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_1)
#else
-#define cfi_interleave_is_1(cfi) (0)
+# define cfi_interleave_is_1() (0)
#endif
-#ifdef CONFIG_MTD_CFI_I2
-# ifdef cfi_interleave
-# undef cfi_interleave
-# define cfi_interleave(cfi) ((cfi)->interleave)
+#ifdef CFIDEV_INTERLEAVE_2
+# ifdef CFIDEV_INTERLEAVE
+# undef CFIDEV_INTERLEAVE
+# define CFIDEV_INTERLEAVE (cfi->interleave)
# else
-# define cfi_interleave(cfi) 2
+# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_2
# endif
-#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
+# define cfi_interleave_is_2() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_2)
#else
-#define cfi_interleave_is_2(cfi) (0)
+# define cfi_interleave_is_2() (0)
#endif
-#ifdef CONFIG_MTD_CFI_I4
-# ifdef cfi_interleave
-# undef cfi_interleave
-# define cfi_interleave(cfi) ((cfi)->interleave)
+#ifdef CFIDEV_INTERLEAVE_4
+# ifdef CFIDEV_INTERLEAVE
+# undef CFIDEV_INTERLEAVE
+# define CFIDEV_INTERLEAVE (cfi->interleave)
# else
-# define cfi_interleave(cfi) 4
+# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_4
# endif
-#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
+# define cfi_interleave_is_4() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_4)
#else
-#define cfi_interleave_is_4(cfi) (0)
+# define cfi_interleave_is_4() (0)
#endif
-#ifdef CONFIG_MTD_CFI_I8
-# ifdef cfi_interleave
-# undef cfi_interleave
-# define cfi_interleave(cfi) ((cfi)->interleave)
+#ifdef CFIDEV_INTERLEAVE_8
+# ifdef CFIDEV_INTERLEAVE
+# undef CFIDEV_INTERLEAVE
+# define CFIDEV_INTERLEAVE (cfi->interleave)
# else
-# define cfi_interleave(cfi) 8
+# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_8
# endif
-#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
+# define cfi_interleave_is_8() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_8)
#else
-#define cfi_interleave_is_8(cfi) (0)
+# define cfi_interleave_is_8() (0)
#endif
-static inline int cfi_interleave_supported(int i)
-{
- switch (i) {
-#ifdef CONFIG_MTD_CFI_I1
- case 1:
+#ifndef CFIDEV_INTERLEAVE
+#error You must define at least one interleave to support!
#endif
-#ifdef CONFIG_MTD_CFI_I2
- case 2:
+
+#ifdef CFIDEV_BUSWIDTH_1
+# ifdef CFIDEV_BUSWIDTH
+# undef CFIDEV_BUSWIDTH
+# define CFIDEV_BUSWIDTH (map->buswidth)
+# else
+# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_1
+# endif
+# define cfi_buswidth_is_1() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_1)
+#else
+# define cfi_buswidth_is_1() (0)
#endif
-#ifdef CONFIG_MTD_CFI_I4
- case 4:
+
+#ifdef CFIDEV_BUSWIDTH_2
+# ifdef CFIDEV_BUSWIDTH
+# undef CFIDEV_BUSWIDTH
+# define CFIDEV_BUSWIDTH (map->buswidth)
+# else
+# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_2
+# endif
+# define cfi_buswidth_is_2() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_2)
+#else
+# define cfi_buswidth_is_2() (0)
#endif
-#ifdef CONFIG_MTD_CFI_I8
- case 8:
+
+#ifdef CFIDEV_BUSWIDTH_4
+# ifdef CFIDEV_BUSWIDTH
+# undef CFIDEV_BUSWIDTH
+# define CFIDEV_BUSWIDTH (map->buswidth)
+# else
+# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_4
+# endif
+# define cfi_buswidth_is_4() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_4)
+#else
+# define cfi_buswidth_is_4() (0)
#endif
- return 1;
- default:
- return 0;
- }
-}
+#ifdef CFIDEV_BUSWIDTH_8
+# ifdef CFIDEV_BUSWIDTH
+# undef CFIDEV_BUSWIDTH
+# define CFIDEV_BUSWIDTH (map->buswidth)
+# else
+# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_8
+# endif
+# define cfi_buswidth_is_8() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_8)
+#else
+# define cfi_buswidth_is_8() (0)
+#endif
+#ifndef CFIDEV_BUSWIDTH
+#error You must define at least one bus width to support!
+#endif
/* NB: these values must represents the number of bytes needed to meet the
* device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
/* Basic Query Structure */
struct cfi_ident {
- uint8_t qry[3];
- uint16_t P_ID;
- uint16_t P_ADR;
- uint16_t A_ID;
- uint16_t A_ADR;
- uint8_t VccMin;
- uint8_t VccMax;
- uint8_t VppMin;
- uint8_t VppMax;
- uint8_t WordWriteTimeoutTyp;
- uint8_t BufWriteTimeoutTyp;
- uint8_t BlockEraseTimeoutTyp;
- uint8_t ChipEraseTimeoutTyp;
- uint8_t WordWriteTimeoutMax;
- uint8_t BufWriteTimeoutMax;
- uint8_t BlockEraseTimeoutMax;
- uint8_t ChipEraseTimeoutMax;
- uint8_t DevSize;
- uint16_t InterfaceDesc;
- uint16_t MaxBufWriteSize;
- uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ __u8 qry[3];
+ __u16 P_ID;
+ __u16 P_ADR;
+ __u16 A_ID;
+ __u16 A_ADR;
+ __u8 VccMin;
+ __u8 VccMax;
+ __u8 VppMin;
+ __u8 VppMax;
+ __u8 WordWriteTimeoutTyp;
+ __u8 BufWriteTimeoutTyp;
+ __u8 BlockEraseTimeoutTyp;
+ __u8 ChipEraseTimeoutTyp;
+ __u8 WordWriteTimeoutMax;
+ __u8 BufWriteTimeoutMax;
+ __u8 BlockEraseTimeoutMax;
+ __u8 ChipEraseTimeoutMax;
+ __u8 DevSize;
+ __u16 InterfaceDesc;
+ __u16 MaxBufWriteSize;
+ __u8 NumEraseRegions;
+ __u32 EraseRegionInfo[0]; /* Not host ordered */
} __attribute__((packed));
/* Extended Query Structure for both PRI and ALT */
struct cfi_extquery {
- uint8_t pri[3];
- uint8_t MajorVersion;
- uint8_t MinorVersion;
+ __u8 pri[3];
+ __u8 MajorVersion;
+ __u8 MinorVersion;
} __attribute__((packed));
/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
struct cfi_pri_intelext {
- uint8_t pri[3];
- uint8_t MajorVersion;
- uint8_t MinorVersion;
- uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
- block follows - FIXME - not currently supported */
- uint8_t SuspendCmdSupport;
- uint16_t BlkStatusRegMask;
- uint8_t VccOptimal;
- uint8_t VppOptimal;
- uint8_t NumProtectionFields;
- uint16_t ProtRegAddr;
- uint8_t FactProtRegSize;
- uint8_t UserProtRegSize;
-} __attribute__((packed));
-
-/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
-
-struct cfi_pri_amdstd {
- uint8_t pri[3];
- uint8_t MajorVersion;
- uint8_t MinorVersion;
- uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
- uint8_t EraseSuspend;
- uint8_t BlkProt;
- uint8_t TmpBlkUnprotect;
- uint8_t BlkProtUnprot;
- uint8_t SimultaneousOps;
- uint8_t BurstMode;
- uint8_t PageMode;
- uint8_t VppMin;
- uint8_t VppMax;
- uint8_t TopBottom;
+ __u8 pri[3];
+ __u8 MajorVersion;
+ __u8 MinorVersion;
+ __u32 FeatureSupport;
+ __u8 SuspendCmdSupport;
+ __u16 BlkStatusRegMask;
+ __u8 VccOptimal;
+ __u8 VppOptimal;
+ __u8 NumProtectionFields;
+ __u16 ProtRegAddr;
+ __u8 FactProtRegSize;
+ __u8 UserProtRegSize;
} __attribute__((packed));
struct cfi_pri_query {
- uint8_t NumFields;
- uint32_t ProtField[1]; /* Not host ordered */
+ __u8 NumFields;
+ __u32 ProtField[1]; /* Not host ordered */
} __attribute__((packed));
struct cfi_bri_query {
- uint8_t PageModeReadCap;
- uint8_t NumFields;
- uint32_t ConfField[1]; /* Not host ordered */
+ __u8 PageModeReadCap;
+ __u8 NumFields;
+ __u32 ConfField[1]; /* Not host ordered */
} __attribute__((packed));
#define P_ID_NONE 0
#define P_ID_AMD_STD 2
#define P_ID_INTEL_STD 3
#define P_ID_AMD_EXT 4
-#define P_ID_ST_ADV 32
#define P_ID_MITSUBISHI_STD 256
#define P_ID_MITSUBISHI_EXT 257
-#define P_ID_SST_PAGE 258
#define P_ID_RESERVED 65535
#define CFI_MODE_JEDEC 0
struct cfi_private {
- uint16_t cmdset;
+ __u16 cmdset;
void *cmdset_priv;
int interleave;
int device_type;
int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
int addr_unlock1;
int addr_unlock2;
+ int fast_prog;
struct mtd_info *(*cmdset_setup)(struct map_info *);
struct cfi_ident *cfiq; /* For now only one. We insist that all devs
must be of the same type. */
struct flchip chips[0]; /* per-chip data structure for each chip */
};
+#define MAX_CFI_CHIPS 8 /* Entirely arbitrary to avoid realloc() */
+
/*
* Returns the command address according to the given geometry.
*/
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
+static inline __u32 cfi_build_cmd_addr(__u32 cmd_ofs, int interleave, int type)
{
return (cmd_ofs * type) * interleave;
}
/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
+ * Transforms the CFI command for the given geometry (bus width & interleave.
*/
-static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
+static inline cfi_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(cmd);
- break;
+ cfi_word val = 0;
+
+ if (cfi_buswidth_is_1()) {
+ /* 1 x8 device */
+ val = cmd;
+ } else if (cfi_buswidth_is_2()) {
+ if (cfi_interleave_is_1()) {
+ /* 1 x16 device in x16 mode */
+ val = cpu_to_cfi16(cmd);
+ } else if (cfi_interleave_is_2()) {
+ /* 2 (x8, x16 or x32) devices in x8 mode */
+ val = cpu_to_cfi16((cmd << 8) | cmd);
+ }
+ } else if (cfi_buswidth_is_4()) {
+ if (cfi_interleave_is_1()) {
+ /* 1 x32 device in x32 mode */
+ val = cpu_to_cfi32(cmd);
+ } else if (cfi_interleave_is_2()) {
+ /* 2 x16 device in x16 mode */
+ val = cpu_to_cfi32((cmd << 16) | cmd);
+ } else if (cfi_interleave_is_4()) {
+ /* 4 (x8, x16 or x32) devices in x8 mode */
+ val = (cmd << 16) | cmd;
+ val = cpu_to_cfi32((val << 8) | val);
+ }
+#ifdef CFI_WORD_64
+ } else if (cfi_buswidth_is_8()) {
+ if (cfi_interleave_is_1()) {
+ /* 1 x64 device in x64 mode */
+ val = cpu_to_cfi64(cmd);
+ } else if (cfi_interleave_is_2()) {
+ /* 2 x32 device in x32 mode */
+ val = cmd;
+ val = cpu_to_cfi64((val << 32) | val);
+ } else if (cfi_interleave_is_4()) {
+ /* 4 (x16, x32 or x64) devices in x16 mode */
+ val = (cmd << 16) | cmd;
+ val = cpu_to_cfi64((val << 32) | val);
+ } else if (cfi_interleave_is_8()) {
+ /* 8 (x8, x16 or x32) devices in x8 mode */
+ val = (cmd << 8) | cmd;
+ val = (val << 16) | val;
+ val = (val << 32) | val;
+ val = cpu_to_cfi64(val);
+ }
+#endif /* CFI_WORD_64 */
}
+ return val;
+}
+#define CMD(x) cfi_build_cmd((x), map, cfi)
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
+/*
+ * Read a value according to the bus width.
+ */
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
+static inline cfi_word cfi_read(struct map_info *map, __u32 addr)
+{
+ if (cfi_buswidth_is_1()) {
+ return map_read8(map, addr);
+ } else if (cfi_buswidth_is_2()) {
+ return map_read16(map, addr);
+ } else if (cfi_buswidth_is_4()) {
+ return map_read32(map, addr);
+ } else if (cfi_buswidth_is_8()) {
+ return map_read64(map, addr);
+ } else {
+ return 0;
}
+}
- return val;
+/*
+ * Write a value according to the bus width.
+ */
+
+static inline void cfi_write(struct map_info *map, cfi_word val, __u32 addr)
+{
+ if (cfi_buswidth_is_1()) {
+ map_write8(map, val, addr);
+ } else if (cfi_buswidth_is_2()) {
+ map_write16(map, val, addr);
+ } else if (cfi_buswidth_is_4()) {
+ map_write32(map, val, addr);
+ } else if (cfi_buswidth_is_8()) {
+ map_write64(map, val, addr);
+ }
}
-#define CMD(x) cfi_build_cmd((x), map, cfi)
/*
* Sends a CFI command to a bank of flash for the given geometry.
* If prev_val is non-null, it will be set to the value at the command address,
* before the command was written.
*/
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+static inline __u32 cfi_send_gen_cmd(u_char cmd, __u32 cmd_addr, __u32 base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
+ int type, cfi_word *prev_val)
{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
+ cfi_word val;
+ __u32 addr = base + cfi_build_cmd_addr(cmd_addr, CFIDEV_INTERLEAVE, type);
val = cfi_build_cmd(cmd, map, cfi);
if (prev_val)
- *prev_val = map_read(map, addr);
+ *prev_val = cfi_read(map, addr);
- map_write(map, val, addr);
+ cfi_write(map, val, addr);
return addr - base;
}
-static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
+static inline __u8 cfi_read_query(struct map_info *map, __u32 addr)
{
- map_word val = map_read(map, addr);
-
- if (map_bankwidth_is_1(map)) {
- return val.x[0];
- } else if (map_bankwidth_is_2(map)) {
- return cfi16_to_cpu(val.x[0]);
+ if (cfi_buswidth_is_1()) {
+ return map_read8(map, addr);
+ } else if (cfi_buswidth_is_2()) {
+ return cfi16_to_cpu(map_read16(map, addr));
+ } else if (cfi_buswidth_is_4()) {
+ return cfi32_to_cpu(map_read32(map, addr));
+ } else if (cfi_buswidth_is_8()) {
+ return cfi64_to_cpu(map_read64(map, addr));
} else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(val.x[0]);
+ return 0;
}
}
static inline void cfi_udelay(int us)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
unsigned long t = us * HZ / 1000000;
if (t) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(t);
return;
}
+#endif
udelay(us);
cond_resched();
}
spin_unlock_bh(mutex);
}
-struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
- const char* name);
-
-struct cfi_fixup {
- uint16_t mfr;
- uint16_t id;
- void (*fixup)(struct map_info *map, void* param);
- void* param;
-};
-
-#define CFI_MFR_ANY 0xffff
-#define CFI_ID_ANY 0xffff
-
-void cfi_fixup(struct map_info *map, struct cfi_fixup* fixups);
-
#endif /* __MTD_CFI_H__ */
-/*
- * Linux driver for Disk-On-Chip devices
- *
- * Copyright (C) 1999 Machine Vision Holdings, Inc.
- * Copyright (C) 2001-2003 David Woodhouse <dwmw2@infradead.org>
- * Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com>
- * Copyright (C) 2002-2003 SnapGear Inc
- *
- * $Id: doc2000.h,v 1.22 2003/11/05 10:51:36 dwmw2 Exp $
- *
- * Released under GPL
- */
+
+/* Linux driver for Disk-On-Chip 2000 */
+/* (c) 1999 Machine Vision Holdings, Inc. */
+/* Author: David Woodhouse <dwmw2@mvhi.com> */
+/* $Id: doc2000.h,v 1.17 2003/06/12 01:20:46 gerg Exp $ */
#ifndef __MTD_DOC2000_H__
#define __MTD_DOC2000_H__
#include <linux/mtd/mtd.h>
-#include <asm/semaphore.h>
#define DoC_Sig1 0
#define DoC_Sig2 1
* Others use readb/writeb
*/
#if defined(__arm__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2))))
-#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0)
+#define ReadDOC_(adr, reg) ((unsigned char)(*(__u32 *)(((unsigned long)adr)+((reg)<<2))))
+#define WriteDOC_(d, adr, reg) do{ *(__u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0)
#define DOC_IOREMAP_LEN 0x8000
#elif defined(__ppc__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1))))
-#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0)
+#define ReadDOC_(adr, reg) ((unsigned char)(*(__u16 *)(((unsigned long)adr)+((reg)<<1))))
+#define WriteDOC_(d, adr, reg) do{ *(__u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0)
#define DOC_IOREMAP_LEN 0x4000
#else
#define ReadDOC_(adr, reg) readb(((unsigned long)adr) + (reg))
#define DOC_MODE_MDWREN 0x04
#define DOC_ChipID_Doc2k 0x20
-#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */
#define DOC_ChipID_DocMil 0x30
#define DOC_ChipID_DocMilPlus32 0x40
#define DOC_ChipID_DocMilPlus16 0x41
#define MAX_FLOORS 4
#define MAX_CHIPS 4
-#define MAX_FLOORS_MIL 1
+#define MAX_FLOORS_MIL 4
#define MAX_CHIPS_MIL 1
-#define MAX_FLOORS_MPLUS 2
+#define MAX_FLOORS_MPLUS 1
#define MAX_CHIPS_MPLUS 1
#define ADDR_COLUMN 1
unsigned long physadr;
unsigned long virtadr;
unsigned long totlen;
- unsigned char ChipID; /* Type of DiskOnChip */
+ char ChipID; /* Type of DiskOnChip */
int ioreg;
unsigned long mfr; /* Flash IDs - only one type of flash per device */
*
* (C) 2000 Red Hat. GPLd.
*
- * $Id: flashchip.h,v 1.14 2004/06/15 16:44:59 nico Exp $
+ * $Id: flashchip.h,v 1.9 2003/04/30 11:15:22 dwmw2 Exp $
*
*/
/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
- if they're interleaved. This can even refer to individual partitions on
- the same physical chip when present. */
+ if they're interleaved. */
struct flchip {
unsigned long start; /* Offset within the map */
int write_suspended:1;
int erase_suspended:1;
- unsigned long in_progress_block_addr;
spinlock_t *mutex;
spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
int word_write_time;
int buffer_write_time;
int erase_time;
-
- void *priv;
};
-/* This is used to handle contention on write/erase operations
- between partitions of the same physical chip. */
-struct flchip_shared {
- spinlock_t lock;
- struct flchip *writing;
- struct flchip *erasing;
-};
#endif /* __MTD_FLASHCHIP_H__ */
/*
- * $Id: ftl.h,v 1.6 2003/01/24 13:20:04 dwmw2 Exp $
+ * $Id: ftl.h,v 1.5 2001/06/02 20:35:51 dwmw2 Exp $
*
* Derived from (and probably identical to):
* ftl.h 1.7 1999/10/25 20:23:17
/*
* (C) 2001, 2001 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.h,v 1.2 2003/11/08 00:51:21 dsaxena Exp $
+ * $Id: gen_probe.h,v 1.1 2001/09/02 18:50:13 dwmw2 Exp $
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__
#include <linux/mtd/flashchip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
-#include <asm/bitops.h>
struct chip_probe {
char *name;
int (*probe_chip)(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi);
+ struct flchip *chips, struct cfi_private *cfi);
+
};
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
*
- * $Id: inftl.h,v 1.6 2004/06/30 14:49:00 dbrown Exp $
+ * $Id: inftl.h,v 1.3 2003/05/23 11:35:34 dwmw2 Exp $
*/
#ifndef __MTD_INFTL_H__
#define __MTD_INFTL_H__
-#ifndef __KERNEL__
-#error This is a kernel header. Perhaps include nftl-user.h instead?
-#endif
-
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
-#include <mtd/inftl-user.h>
+#define OSAK_VERSION 0x5120
+#define PERCENTUSED 98
+
+#define SECTORSIZE 512
#ifndef INFTL_MAJOR
-#define INFTL_MAJOR 94
+#define INFTL_MAJOR 93 /* FIXME */
#endif
#define INFTL_PARTN_BITS 4
+/* Block Control Information */
+
+struct inftl_bci {
+ __u8 ECCsig[6];
+ __u8 Status;
+ __u8 Status1;
+} __attribute__((packed));
+
+struct inftl_unithead1 {
+ __u16 virtualUnitNo;
+ __u16 prevUnitNo;
+ __u8 ANAC;
+ __u8 NACs;
+ __u8 parityPerField;
+ __u8 discarded;
+} __attribute__((packed));
+
+struct inftl_unithead2 {
+ __u8 parityPerField;
+ __u8 ANAC;
+ __u16 prevUnitNo;
+ __u16 virtualUnitNo;
+ __u8 NACs;
+ __u8 discarded;
+} __attribute__((packed));
+
+struct inftl_unittail {
+ __u8 Reserved[4];
+ __u16 EraseMark;
+ __u16 EraseMark1;
+} __attribute__((packed));
+
+union inftl_uci {
+ struct inftl_unithead1 a;
+ struct inftl_unithead2 b;
+ struct inftl_unittail c;
+};
+
+struct inftl_oob {
+ struct inftl_bci b;
+ union inftl_uci u;
+};
+
+
+/* INFTL Media Header */
+
+struct INFTLPartition {
+ __u32 virtualUnits;
+ __u32 firstUnit;
+ __u32 lastUnit;
+ __u32 flags;
+ __u32 spareUnits;
+ __u32 Reserved0;
+ __u32 Reserved1;
+} __attribute__((packed));
+
+struct INFTLMediaHeader {
+ char bootRecordID[8];
+ __u32 NoOfBootImageBlocks;
+ __u32 NoOfBinaryPartitions;
+ __u32 NoOfBDTLPartitions;
+ __u32 BlockMultiplierBits;
+ __u32 FormatFlags;
+ __u32 OsakVersion;
+ __u32 PercentUsed;
+ struct INFTLPartition Partitions[4];
+} __attribute__((packed));
+
+/* Partition flag types */
+#define INFTL_BINARY 0x20000000
+#define INFTL_BDTL 0x40000000
+#define INFTL_LAST 0x80000000
+
+
#ifdef __KERNEL__
struct INFTLrecord {
struct mtd_blktrans_dev mbd;
- __u16 MediaUnit;
+ __u16 MediaUnit, SpareMediaUnit;
__u32 EraseSize;
struct INFTLMediaHeader MediaHdr;
int usecount;
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_oobinfo oobinfo;
};
int INFTL_mount(struct INFTLrecord *s);
/* Overhauled routines for dealing with different mmap regions of flash */
-/* $Id: map.h,v 1.43 2004/07/14 13:30:27 dwmw2 Exp $ */
+/* $Id: map.h,v 1.34 2003/05/28 12:42:22 dwmw2 Exp $ */
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
#include <linux/config.h>
#include <linux/types.h>
#include <linux/list.h>
-#include <asm/unaligned.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/bug.h>
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
-#define map_bankwidth(map) 1
-#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
-#define map_bankwidth_is_large(map) (0)
-#define map_words(map) (1)
-#define MAX_MAP_BANKWIDTH 1
-#else
-#define map_bankwidth_is_1(map) (0)
-#endif
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# else
-# define map_bankwidth(map) 2
-# define map_bankwidth_is_large(map) (0)
-# define map_words(map) (1)
-# endif
-#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
-#undef MAX_MAP_BANKWIDTH
-#define MAX_MAP_BANKWIDTH 2
-#else
-#define map_bankwidth_is_2(map) (0)
-#endif
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# else
-# define map_bankwidth(map) 4
-# define map_bankwidth_is_large(map) (0)
-# define map_words(map) (1)
-# endif
-#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
-#undef MAX_MAP_BANKWIDTH
-#define MAX_MAP_BANKWIDTH 4
-#else
-#define map_bankwidth_is_4(map) (0)
-#endif
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# if BITS_PER_LONG < 64
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# endif
-# else
-# define map_bankwidth(map) 8
-# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# endif
-#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
-#undef MAX_MAP_BANKWIDTH
-#define MAX_MAP_BANKWIDTH 8
-#else
-#define map_bankwidth_is_8(map) (0)
-#endif
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# else
-# define map_bankwidth(map) 16
-# define map_bankwidth_is_large(map) (1)
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# endif
-#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
-#undef MAX_MAP_BANKWIDTH
-#define MAX_MAP_BANKWIDTH 16
-#else
-#define map_bankwidth_is_16(map) (0)
-#endif
-
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# else
-# define map_bankwidth(map) 32
-# define map_bankwidth_is_large(map) (1)
-# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
-# endif
-#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
-#undef MAX_MAP_BANKWIDTH
-#define MAX_MAP_BANKWIDTH 32
-#else
-#define map_bankwidth_is_32(map) (0)
-#endif
-
-#ifndef map_bankwidth
-#error "No bus width supported. What's the point?"
-#endif
-
-static inline int map_bankwidth_supported(int w)
-{
- switch (w) {
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
- case 1:
-#endif
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
- case 2:
-#endif
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
- case 4:
-#endif
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
- case 8:
-#endif
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
- case 16:
-#endif
-#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
- case 32:
-#endif
- return 1;
-
- default:
- return 0;
- }
-}
-
-#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
-
-typedef union {
- unsigned long x[MAX_MAP_LONGS];
-} map_word;
/* The map stuff is very simple. You fill in your struct map_info with
a handful of routines for accessing the device, making sure they handle
paging etc. correctly if your device needs it. Then you pass it off
- to a chip probe routine -- either JEDEC or CFI probe or both -- via
- do_map_probe(). If a chip is recognised, the probe code will invoke the
- appropriate chip driver (if present) and return a struct mtd_info.
- At which point, you fill in the mtd->module with your own module
- address, and register it with the MTD core code. Or you could partition
- it and register the partitions instead, or keep it for your own private
- use; whatever.
+ to a chip driver which deals with a mapped device - generally either
+ do_cfi_probe() or do_ram_probe(), either of which will return a
+ struct mtd_info if they liked what they saw. At which point, you
+ fill in the mtd->module with your own module address, and register
+ it.
The mtd->priv field will point to the struct map_info, and any further
private data required by the chip driver is linked from the
unsigned long virt;
void *cached;
- int bankwidth; /* in octets. This isn't necessarily the width
- of actual bus cycles -- it's the repeat interval
- in bytes, before you are talking to the first chip again.
- */
+ int buswidth; /* in octets */
#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
- map_word (*read)(struct map_info *, unsigned long);
+ u8 (*read8)(struct map_info *, unsigned long);
+ u16 (*read16)(struct map_info *, unsigned long);
+ u32 (*read32)(struct map_info *, unsigned long);
+ u64 (*read64)(struct map_info *, unsigned long);
+ /* If it returned a 'long' I'd call it readl.
+ * It doesn't.
+ * I won't.
+ * dwmw2 */
+
void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
-
- void (*write)(struct map_info *, const map_word, unsigned long);
+ void (*write8)(struct map_info *, u8, unsigned long);
+ void (*write16)(struct map_info *, u16, unsigned long);
+ void (*write32)(struct map_info *, u32, unsigned long);
+ void (*write64)(struct map_info *, u64, unsigned long);
void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
/* We can perhaps put in 'point' and 'unpoint' methods, if we really
want to enable XIP for non-linear mappings. Not yet though. */
#endif
- /* It's possible for the map driver to use cached memory in its
- copy_from implementation (and _only_ with copy_from). However,
- when the chip driver knows some flash area has changed contents,
- it will signal it to the map driver through this routine to let
- the map driver invalidate the corresponding cache as needed.
- If there is no cache to care about this can be set to NULL. */
- void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
-
/* set_vpp() must handle being reentered -- enable, enable, disable
must leave it enabled. */
void (*set_vpp)(struct map_info *, int);
#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
-#define INVALIDATE_CACHED_RANGE(map, from, size) \
- do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+#define map_read8(map, ofs) (map)->read8(map, ofs)
+#define map_read16(map, ofs) (map)->read16(map, ofs)
+#define map_read32(map, ofs) (map)->read32(map, ofs)
+#define map_read64(map, ofs) (map)->read64(map, ofs)
+#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
+#define map_write8(map, datum, ofs) (map)->write8(map, datum, ofs)
+#define map_write16(map, datum, ofs) (map)->write16(map, datum, ofs)
+#define map_write32(map, datum, ofs) (map)->write32(map, datum, ofs)
+#define map_write64(map, datum, ofs) (map)->write64(map, datum, ofs)
+#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+extern void simple_map_init(struct map_info *);
+#define map_is_linear(map) (map->phys != NO_XIP)
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
+#else
+static inline u8 map_read8(struct map_info *map, unsigned long ofs)
{
- int i;
- for (i=0; i<map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
- return 1;
+ return __raw_readb(map->virt + ofs);
}
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
+static inline u16 map_read16(struct map_info *map, unsigned long ofs)
{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = val1.x[i] & val2.x[i];
- }
- return r;
+ return __raw_readw(map->virt + ofs);
}
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
+static inline u32 map_read32(struct map_info *map, unsigned long ofs)
{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = val1.x[i] | val2.x[i];
- }
- return r;
+ return __raw_readl(map->virt + ofs);
}
-#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
+static inline u64 map_read64(struct map_info *map, unsigned long ofs)
{
- int i;
-
- for (i=0; i<map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
+#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
+ BUG();
return 0;
-}
-
-static inline map_word map_word_load(struct map_info *map, const void *ptr)
-{
- map_word r;
-
- if (map_bankwidth_is_1(map))
- r.x[0] = *(unsigned char *)ptr;
- else if (map_bankwidth_is_2(map))
- r.x[0] = get_unaligned((uint16_t *)ptr);
- else if (map_bankwidth_is_4(map))
- r.x[0] = get_unaligned((uint32_t *)ptr);
-#if BITS_PER_LONG >= 64
- else if (map_bankwidth_is_8(map))
- r.x[0] = get_unaligned((uint64_t *)ptr);
+#else
+ return __raw_readll(map->virt + ofs);
#endif
- else if (map_bankwidth_is_large(map))
- memcpy(r.x, ptr, map->bankwidth);
-
- return r;
}
-static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
+static inline void map_write8(struct map_info *map, u8 datum, unsigned long ofs)
{
- int i;
-
- if (map_bankwidth_is_large(map)) {
- char *dest = (char *)&orig;
- memcpy(dest+start, buf, len);
- } else {
- for (i=start; i < start+len; i++) {
- int bitpos;
-#ifdef __LITTLE_ENDIAN
- bitpos = i*8;
-#else /* __BIG_ENDIAN */
- bitpos = (map_bankwidth(map)-1-i)*8;
-#endif
- orig.x[0] &= ~(0xff << bitpos);
- orig.x[0] |= buf[i] << bitpos;
- }
- }
- return orig;
+ __raw_writeb(datum, map->virt + ofs);
+ mb();
}
-static inline map_word map_word_ff(struct map_info *map)
+static inline void map_write16(struct map_info *map, u16 datum, unsigned long ofs)
{
- map_word r;
- int i;
-
- for (i=0; i<map_words(map); i++) {
- r.x[i] = ~0UL;
- }
- return r;
+ __raw_writew(datum, map->virt + ofs);
+ mb();
}
-static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
-{
- map_word r;
-
- if (map_bankwidth_is_1(map))
- r.x[0] = __raw_readb(map->virt + ofs);
- else if (map_bankwidth_is_2(map))
- r.x[0] = __raw_readw(map->virt + ofs);
- else if (map_bankwidth_is_4(map))
- r.x[0] = __raw_readl(map->virt + ofs);
-#if BITS_PER_LONG >= 64
- else if (map_bankwidth_is_8(map))
- r.x[0] = __raw_readq(map->virt + ofs);
-#endif
- else if (map_bankwidth_is_large(map))
- memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
- return r;
+static inline void map_write32(struct map_info *map, u32 datum, unsigned long ofs)
+{
+ __raw_writel(datum, map->virt + ofs);
+ mb();
}
-static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+static inline void map_write64(struct map_info *map, u64 datum, unsigned long ofs)
{
- if (map_bankwidth_is_1(map))
- __raw_writeb(datum.x[0], map->virt + ofs);
- else if (map_bankwidth_is_2(map))
- __raw_writew(datum.x[0], map->virt + ofs);
- else if (map_bankwidth_is_4(map))
- __raw_writel(datum.x[0], map->virt + ofs);
-#if BITS_PER_LONG >= 64
- else if (map_bankwidth_is_8(map))
- __raw_writeq(datum.x[0], map->virt + ofs);
-#endif
- else if (map_bankwidth_is_large(map))
- memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
+ BUG();
+#else
+ __raw_writell(datum, map->virt + ofs);
mb();
+#endif /* CFI_B8 */
}
-static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static inline void map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- if (map->cached)
- memcpy(to, (char *)map->cached + from, len);
- else
- memcpy_fromio(to, map->virt + from, len);
+ memcpy_fromio(to, map->virt + from, len);
}
-static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static inline void map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
memcpy_toio(map->virt + to, from, len);
}
-#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
-#define map_read(map, ofs) (map)->read(map, ofs)
-#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
-#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
-#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
-
-extern void simple_map_init(struct map_info *);
-#define map_is_linear(map) (map->phys != NO_XIP)
-
-#else
-#define map_read(map, ofs) inline_map_read(map, ofs)
-#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
-#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
-#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
-
-
-#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
+#define simple_map_init(map) do { } while (0)
#define map_is_linear(map) (1)
#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
-/*
- * $Id: mtd.h,v 1.56 2004/08/09 18:46:04 dmarlin Exp $
- *
- * Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * Released under GPL
- */
+
+/* $Id: mtd.h,v 1.45 2003/05/20 21:56:40 dwmw2 Exp $ */
#ifndef __MTD_MTD_H__
#define __MTD_MTD_H__
-#ifndef __KERNEL__
-#error This is a kernel header. Perhaps include mtd-user.h instead?
-#endif
+#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/uio.h>
-#include <linux/mtd/compatmac.h>
-#include <mtd/mtd-abi.h>
+#endif /* __KERNEL__ */
+
+struct erase_info_user {
+ u_int32_t start;
+ u_int32_t length;
+};
+
+struct mtd_oob_buf {
+ u_int32_t start;
+ u_int32_t length;
+ unsigned char __user *ptr;
+};
#define MTD_CHAR_MAJOR 90
#define MTD_BLOCK_MAJOR 31
#define MAX_MTD_DEVICES 16
+
+
+#define MTD_ABSENT 0
+#define MTD_RAM 1
+#define MTD_ROM 2
+#define MTD_NORFLASH 3
+#define MTD_NANDFLASH 4
+#define MTD_PEROM 5
+#define MTD_OTHER 14
+#define MTD_UNKNOWN 15
+
+
+
+#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash)
+#define MTD_SET_BITS 2 // Bits can be set
+#define MTD_ERASEABLE 4 // Has an erase function
+#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible
+#define MTD_VOLATILE 16 // Set for RAMs
+#define MTD_XIP 32 // eXecute-In-Place possible
+#define MTD_OOB 64 // Out-of-band data (NAND flash)
+#define MTD_ECC 128 // Device capable of automatic ECC
+
+// Some common devices / combinations of capabilities
+#define MTD_CAP_ROM 0
+#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE)
+#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE)
+#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB)
+#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS)
+
+
+// Types of automatic ECC/Checksum available
+#define MTD_ECC_NONE 0 // No automatic ECC available
+#define MTD_ECC_RS_DiskOnChip 1 // Automatic ECC on DiskOnChip
+#define MTD_ECC_SW 2 // SW ECC for Toshiba & Samsung devices
+
+struct mtd_info_user {
+ u_char type;
+ u_int32_t flags;
+ u_int32_t size; // Total size of the MTD
+ u_int32_t erasesize;
+ u_int32_t oobblock; // Size of OOB blocks (e.g. 512)
+ u_int32_t oobsize; // Amount of OOB data per block (e.g. 16)
+ u_int32_t ecctype;
+ u_int32_t eccsize;
+};
+
+struct region_info_user {
+ u_int32_t offset; /* At which this region starts,
+ * from the beginning of the MTD */
+ u_int32_t erasesize; /* For this region */
+ u_int32_t numblocks; /* Number of blocks in this region */
+ u_int32_t regionindex;
+};
+
+#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
+#define MEMERASE _IOW('M', 2, struct erase_info_user)
+#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
+#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
+#define MEMLOCK _IOW('M', 5, struct erase_info_user)
+#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
+#define MEMGETREGIONCOUNT _IOR('M', 7, int)
+#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
+#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo)
+
+struct nand_oobinfo {
+ int useecc;
+ int eccpos[6];
+};
+
+
+#ifndef __KERNEL__
+
+typedef struct mtd_info_user mtd_info_t;
+typedef struct erase_info_user erase_info_t;
+typedef struct region_info_user region_info_t;
+typedef struct nand_oobinfo nand_oobinfo_t;
+
+ /* User-space ioctl definitions */
+
+#else /* __KERNEL__ */
+
+
#define MTD_ERASE_PENDING 0x01
#define MTD_ERASING 0x02
#define MTD_ERASE_SUSPEND 0x04
#define MTD_ERASE_DONE 0x08
#define MTD_ERASE_FAILED 0x10
-/* If the erase fails, fail_addr might indicate exactly which block failed. If
- fail_addr = 0xffffffff, the failure was not at the device level or was not
- specific to any particular block. */
struct erase_info {
struct mtd_info *mtd;
u_int32_t addr;
u_int32_t len;
- u_int32_t fail_addr;
u_long time;
u_long retries;
u_int dev;
u_int32_t oobblock; // Size of OOB blocks (e.g. 512)
u_int32_t oobsize; // Amount of OOB data per block (e.g. 16)
- u_int32_t oobavail; // Number of bytes in OOB area available for fs
u_int32_t ecctype;
u_int32_t eccsize;
int (*suspend) (struct mtd_info *mtd);
void (*resume) (struct mtd_info *mtd);
- /* Bad block management functions */
- int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
-
void *priv;
struct module *owner;
#define MTD_WRITEOOB(mtd, args...) (*(mtd->write_oob))(mtd, args)
#define MTD_SYNC(mtd) do { if (mtd->sync) (*(mtd->sync))(mtd); } while (0)
-
-#ifdef CONFIG_MTD_PARTITIONS
-void mtd_erase_callback(struct erase_info *instr);
-#else
-static inline void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->callback)
- instr->callback(instr);
-}
-#endif
-
/*
* Debugging macro and defines
*/
#endif /* CONFIG_MTD_DEBUG */
+#endif /* __KERNEL__ */
+
#endif /* __MTD_MTD_H__ */
* Steven J. Hill <sjhill@realitydiluted.com>
* Thomas Gleixner <tglx@linutronix.de>
*
- * $Id: nand.h,v 1.63 2004/07/07 16:29:43 gleixner Exp $
+ * $Id: nand.h,v 1.25 2003/05/21 15:15:02 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* NAND_YAFFS_OOB
* 11-25-2002 tglx Added Manufacturer code FUJITSU, NATIONAL
* Split manufacturer and device ID structures
- *
- * 02-08-2004 tglx added option field to nand structure for chip anomalities
- * 05-25-2004 tglx added bad block table support, ST-MICRO manufacturer id
- * update of nand_chip structure description
*/
#ifndef __LINUX_MTD_NAND_H
#define __LINUX_MTD_NAND_H
#include <linux/config.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
-#include <linux/mtd/mtd.h>
struct mtd_info;
-/* Scan and identify a NAND device */
-extern int nand_scan (struct mtd_info *mtd, int max_chips);
-/* Free resources held by the NAND device */
-extern void nand_release (struct mtd_info *mtd);
-
-/* Read raw data from the device without ECC */
-extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen);
-
-
-/* The maximum number of NAND chips in an array */
-#define NAND_MAX_CHIPS 8
-
-/* This constant declares the max. oobsize / page, which
- * is supported now. If you add a chip with bigger oobsize/page
- * adjust this accordingly.
+/*
+ * Searches for a NAND device
*/
-#define NAND_MAX_OOBSIZE 64
+extern int nand_scan (struct mtd_info *mtd);
/*
* Constants for hardware specific CLE/ALE/NCE function
*/
-/* Select the chip by setting nCE to low */
#define NAND_CTL_SETNCE 1
-/* Deselect the chip by setting nCE to high */
#define NAND_CTL_CLRNCE 2
-/* Select the command latch by setting CLE to high */
#define NAND_CTL_SETCLE 3
-/* Deselect the command latch by setting CLE to low */
#define NAND_CTL_CLRCLE 4
-/* Select the address latch by setting ALE to high */
#define NAND_CTL_SETALE 5
-/* Deselect the address latch by setting ALE to low */
#define NAND_CTL_CLRALE 6
-/* Set write protection by setting WP to high. Not used! */
-#define NAND_CTL_SETWP 7
-/* Clear write protection by setting WP to low. Not used! */
-#define NAND_CTL_CLRWP 8
/*
* Standard NAND flash commands
#define NAND_CMD_READOOB 0x50
#define NAND_CMD_ERASE1 0x60
#define NAND_CMD_STATUS 0x70
-#define NAND_CMD_STATUS_MULTI 0x71
#define NAND_CMD_SEQIN 0x80
#define NAND_CMD_READID 0x90
#define NAND_CMD_ERASE2 0xd0
#define NAND_CMD_RESET 0xff
-/* Extended commands for large page devices */
-#define NAND_CMD_READSTART 0x30
-#define NAND_CMD_CACHEDPROG 0x15
-
-/* Status bits */
-#define NAND_STATUS_FAIL 0x01
-#define NAND_STATUS_FAIL_N1 0x02
-#define NAND_STATUS_TRUE_READY 0x20
-#define NAND_STATUS_READY 0x40
-#define NAND_STATUS_WP 0x80
-
/*
* Constants for ECC_MODES
- */
-
-/* No ECC. Usage is not recommended ! */
+ *
+ * NONE: No ECC
+ * SOFT: Software ECC 3 byte ECC per 256 Byte data
+ * HW3_256: Hardware ECC 3 byte ECC per 256 Byte data
+ * HW3_512: Hardware ECC 3 byte ECC per 512 Byte data
+ *
+ *
+*/
#define NAND_ECC_NONE 0
-/* Software ECC 3 byte ECC per 256 Byte data */
#define NAND_ECC_SOFT 1
-/* Hardware ECC 3 byte ECC per 256 Byte data */
#define NAND_ECC_HW3_256 2
-/* Hardware ECC 3 byte ECC per 512 Byte data */
#define NAND_ECC_HW3_512 3
-/* Hardware ECC 3 byte ECC per 512 Byte data */
#define NAND_ECC_HW6_512 4
-/* Hardware ECC 8 byte ECC per 512 Byte data */
-#define NAND_ECC_HW8_512 6
+#define NAND_ECC_DISKONCHIP 5
/*
* Constants for Hardware ECC
*/
-/* Reset Hardware ECC for read */
#define NAND_ECC_READ 0
-/* Reset Hardware ECC for write */
#define NAND_ECC_WRITE 1
-/* Enable Hardware ECC before syndrom is read back from flash */
-#define NAND_ECC_READSYN 2
-
-/* Option constants for bizarre disfunctionality and real
-* features
-*/
-/* Chip can not auto increment pages */
-#define NAND_NO_AUTOINCR 0x00000001
-/* Buswitdh is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
-/* Device supports partial programming without padding */
-#define NAND_NO_PADDING 0x00000004
-/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
-/* Chip has copy back function */
-#define NAND_COPYBACK 0x00000010
-/* AND Chip which has 4 banks and a confusing page / block
- * assignment. See Renesas datasheet for further information */
-#define NAND_IS_AND 0x00000020
-/* Chip has a array of 4 pages which can be read without
- * additional ready /busy waits */
-#define NAND_4PAGE_ARRAY 0x00000040
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS \
- (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
-
-/* Macros to identify the above */
-#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
-#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
-
-/* Mask to zero out the chip options, which come from the id table */
-#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
-
-/* Non chip related options */
-/* Use a flash based bad block table. This option is passed to the
- * default bad block table function. */
-#define NAND_USE_FLASH_BBT 0x00010000
-/* The hw ecc generator provides a syndrome instead a ecc value on read
- * This can only work if we have the ecc bytes directly behind the
- * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
-#define NAND_HWECC_SYNDROME 0x00020000
-
-
-/* Options set by nand scan */
-/* Nand scan has allocated oob_buf */
-#define NAND_OOBBUF_ALLOC 0x40000000
-/* Nand scan has allocated data_buf */
-#define NAND_DATABUF_ALLOC 0x80000000
-
-
+
/*
- * nand_state_t - chip states
* Enumeration for NAND flash chip state
*/
typedef enum {
FL_READING,
FL_WRITING,
FL_ERASING,
- FL_SYNCING,
- FL_CACHEDPRG,
+ FL_SYNCING
} nand_state_t;
-/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device
- * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device
- * @read_byte: [REPLACEABLE] read one byte from the chip
- * @write_byte: [REPLACEABLE] write one byte to the chip
- * @read_word: [REPLACEABLE] read one word from the chip
- * @write_word: [REPLACEABLE] write one word to the chip
- * @write_buf: [REPLACEABLE] write data from the buffer to the chip
- * @read_buf: [REPLACEABLE] read data from the chip into the buffer
- * @verify_buf: [REPLACEABLE] verify buffer contents against the chip data
- * @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check, if the block is bad
- * @block_markbad: [REPLACEABLE] mark the block bad
- * @hwcontrol: [BOARDSPECIFIC] hardwarespecific function for accesing control-lines
- * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line
- * If set to NULL no access to ready/busy is available and the ready/busy information
- * is read from the chip status register
- * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
- * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
- * @calculate_ecc: [REPLACEABLE] function for ecc calculation or readback from ecc hardware
- * @correct_data: [REPLACEABLE] function for ecc correction, matching to ecc generator (sw/hw)
- * @enable_hwecc: [BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only
- * be provided if a hardware ECC is available
- * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
- * @eccmode: [BOARDSPECIFIC] mode of ecc, see defines
- * @eccsize: [INTERN] databytes used per ecc-calculation
- * @eccsteps: [INTERN] number of ecc calculation steps per page
- * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
- * @chip_lock: [INTERN] spinlock used to protect access to this structure and the chip
- * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
- * @state: [INTERN] the current state of the NAND device
- * @page_shift: [INTERN] number of address bits in a page (column address bits)
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @data_buf: [INTERN] internal buffer for one page + oob
- * @oob_buf: [INTERN] oob buffer for one eraseblock
- * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized
- * @data_poi: [INTERN] pointer to a data buffer
- * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
- * special functionality. See the defines for further explanation
- * @badblockpos: [INTERN] position of the bad block marker in the oob area
- * @numchips: [INTERN] number of physical chips
- * @chipsize: [INTERN] the size of one chip for multichip arrays
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @pagebuf: [INTERN] holds the pagenumber which is currently in data_buf
- * @autooob: [REPLACEABLE] the default (auto)placement scheme
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash lookup
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @priv: [OPTIONAL] pointer to private chip date
+/*
+ * NAND Private Flash Chip Data
+ *
+ * Structure overview:
+ *
+ * IO_ADDR_R - address to read the 8 I/O lines of the flash device
+ *
+ * IO_ADDR_W - address to write the 8 I/O lines of the flash device
+ *
+ * hwcontrol - hardwarespecific function for accesing control-lines
+ *
+ * dev_ready - hardwarespecific function for accesing device ready/busy line
+ *
+ * waitfunc - hardwarespecific function for wait on ready
+ *
+ * calculate_ecc - function for ecc calculation or readback from ecc hardware
+ *
+ * correct_data - function for ecc correction, matching to ecc generator (sw/hw)
+ *
+ * enable_hwecc - function to enable (reset) hardware ecc generator
+ *
+ * eccmod - mode of ecc: see constants
+ *
+ * eccsize - databytes used per ecc-calculation
+ *
+ * chip_delay - chip dependent delay for transfering data from array to read regs (tR)
+ *
+ * chip_lock - spinlock used to protect access to this structure
+ *
+ * wq - wait queue to sleep on if a NAND operation is in progress
+ *
+ * state - give the current state of the NAND device
+ *
+ * page_shift - number of address bits in a page (column address bits)
+ *
+ * data_buf - data buffer passed to/from MTD user modules
+ *
+ * data_cache - data cache for redundant page access and shadow for
+ * ECC failure
+ *
+ * cache_page - number of last valid page in page_cache
*/
-
struct nand_chip {
unsigned long IO_ADDR_R;
unsigned long IO_ADDR_W;
-
- u_char (*read_byte)(struct mtd_info *mtd);
- void (*write_byte)(struct mtd_info *mtd, u_char byte);
- u16 (*read_word)(struct mtd_info *mtd);
- void (*write_word)(struct mtd_info *mtd, u16 word);
-
- void (*write_buf)(struct mtd_info *mtd, const u_char *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, u_char *buf, int len);
- int (*verify_buf)(struct mtd_info *mtd, const u_char *buf, int len);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
- int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
- int (*dev_ready)(struct mtd_info *mtd);
+ void (*hwcontrol)(int cmd);
+ int (*dev_ready)(void);
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr);
int (*waitfunc)(struct mtd_info *mtd, struct nand_chip *this, int state);
- int (*calculate_ecc)(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
- int (*correct_data)(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
- void (*enable_hwecc)(struct mtd_info *mtd, int mode);
- void (*erase_cmd)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
+ void (*calculate_ecc)(const u_char *dat, u_char *ecc_code);
+ int (*correct_data)(u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+ void (*enable_hwecc)(int mode);
int eccmode;
int eccsize;
- int eccsteps;
int chip_delay;
- spinlock_t chip_lock;
+ spinlock_t chip_lock;
wait_queue_head_t wq;
nand_state_t state;
int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
u_char *data_buf;
- u_char *oob_buf;
- int oobdirty;
u_char *data_poi;
- unsigned int options;
- int badblockpos;
- int numchips;
- unsigned long chipsize;
- int pagemask;
- int pagebuf;
- struct nand_oobinfo *autooob;
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
- void *priv;
};
/*
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_FUJITSU 0x04
#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-/**
- * struct nand_flash_dev - NAND Flash Device ID Structure
- *
- * @name: Identify the device type
- * @id: device ID code
- * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
- * If the pagesize is 0, then the real pagesize
- * and the eraseize are determined from the
- * extended id bytes in the chip
- * @erasesize: Size of an erase block in the flash device.
- * @chipsize: Total chipsize in Mega Bytes
- * @options: Bitfield to store chip relevant options
+/*
+ * NAND Flash Device ID Structure
+ *
+ * Structure overview:
+ *
+ * name - Identify the device type
+ *
+ * id - device ID code
+ *
+ * chipshift - total number of address bits for the device which
+ * is used to calculate address offsets and the total
+ * number of bytes the device is capable of.
+ *
+ * page256 - denotes if flash device has 256 byte pages or not.
+ *
+ * pageadrlen - number of bytes minus one needed to hold the
+ * complete address into the flash array. Keep in
+ * mind that when a read or write is done to a
+ * specific address, the address is input serially
+ * 8 bits at a time. This structure member is used
+ * by the read/write routines as a loop index for
+ * shifting the address out 8 bits at a time.
+ *
+ * erasesize - size of an erase block in the flash device.
*/
struct nand_flash_dev {
- char *name;
+ char * name;
int id;
- unsigned long pagesize;
- unsigned long chipsize;
+ int chipshift;
unsigned long erasesize;
- unsigned long options;
+ char page256;
};
-/**
- * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
+/*
+ * NAND Flash Manufacturer ID Structure
+ *
+ * name - Manufacturer name
+ *
+ * id - manufacturer ID code of device.
*/
struct nand_manufacturers {
int id;
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-/**
- * struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
- * when bbt is searched, then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob are of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This number of
- * blocks is reserved at the end of the device where the tables are
- * written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
- * bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked good /
- * bad blocks, can be NULL, if len = 0
- *
- * Descriptor for the bad block table marker and the descriptor for the
- * pattern which identifies good and bad blocks. The assumption is made
- * that the pattern and the version count are always located in the oob area
- * of the first block.
- */
-struct nand_bbt_descr {
- int options;
- int pages[NAND_MAX_CHIPS];
- int offs;
- int veroffs;
- uint8_t version[NAND_MAX_CHIPS];
- int len;
- int maxblocks;
- int reserved_block_code;
- uint8_t *pattern;
-};
-
-/* Options for the bad block table descriptors */
-
-/* The number of bits used per block in the bbt on the device */
-#define NAND_BBT_NRBITS_MSK 0x0000000F
-#define NAND_BBT_1BIT 0x00000001
-#define NAND_BBT_2BIT 0x00000002
-#define NAND_BBT_4BIT 0x00000004
-#define NAND_BBT_8BIT 0x00000008
-/* The bad block table is in the last good block of the device */
-#define NAND_BBT_LASTBLOCK 0x00000010
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_ABSPAGE 0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH 0x00000040
-/* bbt is stored per chip on multichip devices */
-#define NAND_BBT_PERCHIP 0x00000080
-/* bbt has a version counter at offset veroffs */
-#define NAND_BBT_VERSION 0x00000100
-/* Create a bbt if none axists */
-#define NAND_BBT_CREATE 0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000400
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00000800
-/* Write bbt if neccecary */
-#define NAND_BBT_WRITE 0x00001000
-/* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT 0x00002000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00004000
-
-/* The maximum number of blocks to scan for a bbt */
-#define NAND_BBT_SCAN_MAXBLOCKS 4
-
-extern int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd);
-extern int nand_update_bbt (struct mtd_info *mtd, loff_t offs);
-extern int nand_default_bbt (struct mtd_info *mtd);
-extern int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt);
-extern int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt);
-
/*
* Constants for oob configuration
*/
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
+#define NAND_BADBLOCK_POS 5
#endif /* __LINUX_MTD_NAND_H */
*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $
+ * $Id: nand_ecc.h,v 1.2 2003/02/20 13:34:20 sjhill Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* This file is the header for the ECC algorithm.
*/
-#ifndef __MTD_NAND_ECC_H__
-#define __MTD_NAND_ECC_H__
-
-struct mtd_info;
+/*
+ * Creates non-inverted ECC code from line parity
+ */
+void nand_trans_result(u_char reg2, u_char reg3, u_char *ecc_code);
/*
* Calculate 3 byte ECC code for 256 byte block
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+void nand_calculate_ecc (const u_char *dat, u_char *ecc_code);
/*
* Detect and correct a 1 bit error for 256 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
-
-#endif /* __MTD_NAND_ECC_H__ */
+int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc);
/*
- * $Id: nftl.h,v 1.16 2004/06/30 14:49:00 dbrown Exp $
+ * $Id: nftl.h,v 1.13 2003/05/23 11:25:02 dwmw2 Exp $
*
* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
*/
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
-#include <mtd/nftl-user.h>
+/* Block Control Information */
+
+struct nftl_bci {
+ unsigned char ECCSig[6];
+ __u8 Status;
+ __u8 Status1;
+}__attribute__((packed));
+
+/* Unit Control Information */
+
+struct nftl_uci0 {
+ __u16 VirtUnitNum;
+ __u16 ReplUnitNum;
+ __u16 SpareVirtUnitNum;
+ __u16 SpareReplUnitNum;
+} __attribute__((packed));
+
+struct nftl_uci1 {
+ __u32 WearInfo;
+ __u16 EraseMark;
+ __u16 EraseMark1;
+} __attribute__((packed));
+
+struct nftl_uci2 {
+ __u16 FoldMark;
+ __u16 FoldMark1;
+ __u32 unused;
+} __attribute__((packed));
+
+union nftl_uci {
+ struct nftl_uci0 a;
+ struct nftl_uci1 b;
+ struct nftl_uci2 c;
+};
+
+struct nftl_oob {
+ struct nftl_bci b;
+ union nftl_uci u;
+};
+
+/* NFTL Media Header */
+
+struct NFTLMediaHeader {
+ char DataOrgID[6];
+ __u16 NumEraseUnits;
+ __u16 FirstPhysicalEUN;
+ __u32 FormattedSize;
+ unsigned char UnitSizeFactor;
+} __attribute__((packed));
+
+#define MAX_ERASE_ZONES (8192 - 512)
+
+#define ERASE_MARK 0x3c69
+#define SECTOR_FREE 0xff
+#define SECTOR_USED 0x55
+#define SECTOR_IGNORE 0x11
+#define SECTOR_DELETED 0x00
+
+#define FOLD_MARK_IN_PROGRESS 0x5555
+
+#define ZONE_GOOD 0xff
+#define ZONE_BAD_ORIGINAL 0
+#define ZONE_BAD_MARKED 7
+
+#ifdef __KERNEL__
/* these info are used in ReplUnitTable */
#define BLOCK_NIL 0xffff /* last block of a chain */
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_oobinfo oobinfo;
};
int NFTL_mount(struct NFTLrecord *s);
#define MAX_SECTORS_PER_UNIT 64
#define NFTL_PARTN_BITS 4
+#endif /* __KERNEL__ */
+
#endif /* __MTD_NFTL_H__ */
*
* This code is GPL
*
- * $Id: partitions.h,v 1.15 2003/07/09 11:15:43 dwmw2 Exp $
+ * $Id: partitions.h,v 1.14 2003/05/20 21:56:29 dwmw2 Exp $
*/
#ifndef MTD_PARTITIONS_H
#define MTDPART_SIZ_FULL (0)
-int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int add_mtd_partitions(struct mtd_info *, struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
/*
+++ /dev/null
-/*
- * For boards with physically mapped flash and using
- * drivers/mtd/maps/physmap.c mapping driver.
- *
- * $Id: physmap.h,v 1.3 2004/07/21 00:16:15 jwboyer Exp $
- *
- * Copyright (C) 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MTD_PHYSMAP__
-
-#include <linux/config.h>
-
-#if defined(CONFIG_MTD_PHYSMAP)
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-/*
- * The map_info for physmap. Board can override size, buswidth, phys,
- * (*set_vpp)(), etc in their initial setup routine.
- */
-extern struct map_info physmap_map;
-
-/*
- * Board needs to specify the exact mapping during their setup time.
- */
-static inline void physmap_configure(unsigned long addr, unsigned long size, int bankwidth, void (*set_vpp)(struct map_info *, int) )
-{
- physmap_map.phys = addr;
- physmap_map.size = size;
- physmap_map.bankwidth = bankwidth;
- physmap_map.set_vpp = set_vpp;
-}
-
-#if defined(CONFIG_MTD_PARTITIONS)
-
-/*
- * Machines that wish to do flash partition may want to call this function in
- * their setup routine.
- *
- * physmap_set_partitions(mypartitions, num_parts);
- *
- * Note that one can always override this hard-coded partition with
- * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS).
- */
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
-
-#endif /* defined(CONFIG_MTD_PARTITIONS) */
-#endif /* defined(CONFIG_MTD) */
-
-#endif /* __LINUX_MTD_PHYSMAP__ */
-
unsigned mt_segno; /* the segment to read or write */
unsigned mt_mode; /* modes for read/write (sync/async etc.) */
int mt_result; /* result of r/w request, not of the ioctl */
- void __user *mt_data; /* User space buffer: must be 29kb */
+ void *mt_data; /* User space buffer: must be 29kb */
};
/* get tape capacity (ftape/zftape)
int create_mode;
};
-enum { MAX_NESTED_LINKS = 8 };
+enum { MAX_NESTED_LINKS = 5 };
struct nameidata {
struct dentry *dentry;
#define LOOKUP_CONTINUE 4
#define LOOKUP_PARENT 16
#define LOOKUP_NOALT 32
-#define LOOKUP_ATOMIC 64
-
/*
* Intent data
*/
};
struct iovec;
-struct kvec;
extern int sock_wake_async(struct socket *sk, int how, int band);
extern int sock_register(struct net_proto_family *fam);
extern unsigned long net_random(void);
extern void net_srandom(unsigned long);
-extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
-extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t len, int flags);
-
#ifndef CONFIG_SMP
#define SOCKOPS_WRAPPED(name) name
#define SOCKOPS_WRAP(name, fam)
struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;
+ struct Qdisc *qdisc_list;
struct Qdisc *qdisc_ingress;
- struct list_head qdisc_list;
unsigned long tx_queue_len; /* Max frames per queue allowed */
/* ingress path synchronizer */
/* bridge stuff */
struct net_bridge_port *br_port;
+#ifdef CONFIG_NET_FASTROUTE
+#define NETDEV_FASTROUTE_HMASK 0xF
+ /* Semi-private data. Keep it at the end of device struct. */
+ rwlock_t fastpath_lock;
+ struct dst_entry *fastpath[NETDEV_FASTROUTE_HMASK+1];
+#endif
#ifdef CONFIG_NET_DIVERT
/* this will get initialized at each interface type init routine */
struct divert_blk *divert;
extern atomic_t netdev_dropping;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff **pskb, int inward);
+#ifdef CONFIG_NET_FASTROUTE
+extern int netdev_fastroute;
+extern int netdev_fastroute_obstacles;
+extern void dev_clear_fastroute(struct net_device *dev);
+#endif
#ifdef CONFIG_SYSCTL
extern char *net_sysctl_strdup(const char *s);
#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
-#include <linux/vserver/context.h>
#include <asm/atomic.h>
enum ip_conntrack_info
} nat;
#endif /* CONFIG_IP_NF_NAT_NEEDED */
- /* VServer context id */
- xid_t xid[IP_CT_DIR_MAX];
-
};
/* get master conntrack via master expectation */
+++ /dev/null
-/* PPTP constants and structs */
-#ifndef _CONNTRACK_PPTP_H
-#define _CONNTRACK_PPTP_H
-
-/* state of the control session */
-enum pptp_ctrlsess_state {
- PPTP_SESSION_NONE, /* no session present */
- PPTP_SESSION_ERROR, /* some session error */
- PPTP_SESSION_STOPREQ, /* stop_sess request seen */
- PPTP_SESSION_REQUESTED, /* start_sess request seen */
- PPTP_SESSION_CONFIRMED, /* session established */
-};
-
-/* state of the call inside the control session */
-enum pptp_ctrlcall_state {
- PPTP_CALL_NONE,
- PPTP_CALL_ERROR,
- PPTP_CALL_OUT_REQ,
- PPTP_CALL_OUT_CONF,
- PPTP_CALL_IN_REQ,
- PPTP_CALL_IN_REP,
- PPTP_CALL_IN_CONF,
- PPTP_CALL_CLEAR_REQ,
-};
-
-
-/* conntrack private data */
-struct ip_ct_pptp_master {
- enum pptp_ctrlsess_state sstate; /* session state */
-
- /* everything below is going to be per-expectation in newnat,
- * since there could be more than one call within one session */
- enum pptp_ctrlcall_state cstate; /* call state */
- u_int16_t pac_call_id; /* call id of PAC, host byte order */
- u_int16_t pns_call_id; /* call id of PNS, host byte order */
-};
-
-/* conntrack_expect private member */
-struct ip_ct_pptp_expect {
- enum pptp_ctrlcall_state cstate; /* call state */
- u_int16_t pac_call_id; /* call id of PAC */
- u_int16_t pns_call_id; /* call id of PNS */
-};
-
-
-#ifdef __KERNEL__
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-DECLARE_LOCK_EXTERN(ip_pptp_lock);
-
-#define IP_CONNTR_PPTP PPTP_CONTROL_PORT
-
-#define PPTP_CONTROL_PORT 1723
-
-#define PPTP_PACKET_CONTROL 1
-#define PPTP_PACKET_MGMT 2
-
-#define PPTP_MAGIC_COOKIE 0x1a2b3c4d
-
-struct pptp_pkt_hdr {
- __u16 packetLength;
- __u16 packetType;
- __u32 magicCookie;
-};
-
-/* PptpControlMessageType values */
-#define PPTP_START_SESSION_REQUEST 1
-#define PPTP_START_SESSION_REPLY 2
-#define PPTP_STOP_SESSION_REQUEST 3
-#define PPTP_STOP_SESSION_REPLY 4
-#define PPTP_ECHO_REQUEST 5
-#define PPTP_ECHO_REPLY 6
-#define PPTP_OUT_CALL_REQUEST 7
-#define PPTP_OUT_CALL_REPLY 8
-#define PPTP_IN_CALL_REQUEST 9
-#define PPTP_IN_CALL_REPLY 10
-#define PPTP_IN_CALL_CONNECT 11
-#define PPTP_CALL_CLEAR_REQUEST 12
-#define PPTP_CALL_DISCONNECT_NOTIFY 13
-#define PPTP_WAN_ERROR_NOTIFY 14
-#define PPTP_SET_LINK_INFO 15
-
-#define PPTP_MSG_MAX 15
-
-/* PptpGeneralError values */
-#define PPTP_ERROR_CODE_NONE 0
-#define PPTP_NOT_CONNECTED 1
-#define PPTP_BAD_FORMAT 2
-#define PPTP_BAD_VALUE 3
-#define PPTP_NO_RESOURCE 4
-#define PPTP_BAD_CALLID 5
-#define PPTP_REMOVE_DEVICE_ERROR 6
-
-struct PptpControlHeader {
- __u16 messageType;
- __u16 reserved;
-};
-
-/* FramingCapability Bitmap Values */
-#define PPTP_FRAME_CAP_ASYNC 0x1
-#define PPTP_FRAME_CAP_SYNC 0x2
-
-/* BearerCapability Bitmap Values */
-#define PPTP_BEARER_CAP_ANALOG 0x1
-#define PPTP_BEARER_CAP_DIGITAL 0x2
-
-struct PptpStartSessionRequest {
- __u16 protocolVersion;
- __u8 reserved1;
- __u8 reserved2;
- __u32 framingCapability;
- __u32 bearerCapability;
- __u16 maxChannels;
- __u16 firmwareRevision;
- __u8 hostName[64];
- __u8 vendorString[64];
-};
-
-/* PptpStartSessionResultCode Values */
-#define PPTP_START_OK 1
-#define PPTP_START_GENERAL_ERROR 2
-#define PPTP_START_ALREADY_CONNECTED 3
-#define PPTP_START_NOT_AUTHORIZED 4
-#define PPTP_START_UNKNOWN_PROTOCOL 5
-
-struct PptpStartSessionReply {
- __u16 protocolVersion;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u32 framingCapability;
- __u32 bearerCapability;
- __u16 maxChannels;
- __u16 firmwareRevision;
- __u8 hostName[64];
- __u8 vendorString[64];
-};
-
-/* PptpStopReasons */
-#define PPTP_STOP_NONE 1
-#define PPTP_STOP_PROTOCOL 2
-#define PPTP_STOP_LOCAL_SHUTDOWN 3
-
-struct PptpStopSessionRequest {
- __u8 reason;
-};
-
-/* PptpStopSessionResultCode */
-#define PPTP_STOP_OK 1
-#define PPTP_STOP_GENERAL_ERROR 2
-
-struct PptpStopSessionReply {
- __u8 resultCode;
- __u8 generalErrorCode;
-};
-
-struct PptpEchoRequest {
- __u32 identNumber;
-};
-
-/* PptpEchoReplyResultCode */
-#define PPTP_ECHO_OK 1
-#define PPTP_ECHO_GENERAL_ERROR 2
-
-struct PptpEchoReply {
- __u32 identNumber;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 reserved;
-};
-
-/* PptpFramingType */
-#define PPTP_ASYNC_FRAMING 1
-#define PPTP_SYNC_FRAMING 2
-#define PPTP_DONT_CARE_FRAMING 3
-
-/* PptpCallBearerType */
-#define PPTP_ANALOG_TYPE 1
-#define PPTP_DIGITAL_TYPE 2
-#define PPTP_DONT_CARE_BEARER_TYPE 3
-
-struct PptpOutCallRequest {
- __u16 callID;
- __u16 callSerialNumber;
- __u32 minBPS;
- __u32 maxBPS;
- __u32 bearerType;
- __u32 framingType;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u16 reserved1;
- __u16 phoneNumberLength;
- __u16 reserved2;
- __u8 phoneNumber[64];
- __u8 subAddress[64];
-};
-
-/* PptpCallResultCode */
-#define PPTP_OUTCALL_CONNECT 1
-#define PPTP_OUTCALL_GENERAL_ERROR 2
-#define PPTP_OUTCALL_NO_CARRIER 3
-#define PPTP_OUTCALL_BUSY 4
-#define PPTP_OUTCALL_NO_DIAL_TONE 5
-#define PPTP_OUTCALL_TIMEOUT 6
-#define PPTP_OUTCALL_DONT_ACCEPT 7
-
-struct PptpOutCallReply {
- __u16 callID;
- __u16 peersCallID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 causeCode;
- __u32 connectSpeed;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u32 physChannelID;
-};
-
-struct PptpInCallRequest {
- __u16 callID;
- __u16 callSerialNumber;
- __u32 callBearerType;
- __u32 physChannelID;
- __u16 dialedNumberLength;
- __u16 dialingNumberLength;
- __u8 dialedNumber[64];
- __u8 dialingNumber[64];
- __u8 subAddress[64];
-};
-
-/* PptpInCallResultCode */
-#define PPTP_INCALL_ACCEPT 1
-#define PPTP_INCALL_GENERAL_ERROR 2
-#define PPTP_INCALL_DONT_ACCEPT 3
-
-struct PptpInCallReply {
- __u16 callID;
- __u16 peersCallID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u16 reserved;
-};
-
-struct PptpInCallConnected {
- __u16 peersCallID;
- __u16 reserved;
- __u32 connectSpeed;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u32 callFramingType;
-};
-
-struct PptpClearCallRequest {
- __u16 callID;
- __u16 reserved;
-};
-
-struct PptpCallDisconnectNotify {
- __u16 callID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 causeCode;
- __u16 reserved;
- __u8 callStatistics[128];
-};
-
-struct PptpWanErrorNotify {
- __u16 peersCallID;
- __u16 reserved;
- __u32 crcErrors;
- __u32 framingErrors;
- __u32 hardwareOverRuns;
- __u32 bufferOverRuns;
- __u32 timeoutErrors;
- __u32 alignmentErrors;
-};
-
-struct PptpSetLinkInfo {
- __u16 peersCallID;
- __u16 reserved;
- __u32 sendAccm;
- __u32 recvAccm;
-};
-
-
-struct pptp_priv_data {
- __u16 call_id;
- __u16 mcall_id;
- __u16 pcall_id;
-};
-
-union pptp_ctrl_union {
- struct PptpStartSessionRequest sreq;
- struct PptpStartSessionReply srep;
- struct PptpStopSessionRequest streq;
- struct PptpStopSessionReply strep;
- struct PptpOutCallRequest ocreq;
- struct PptpOutCallReply ocack;
- struct PptpInCallRequest icreq;
- struct PptpInCallReply icack;
- struct PptpInCallConnected iccon;
- struct PptpClearCallRequest clrreq;
- struct PptpCallDisconnectNotify disc;
- struct PptpWanErrorNotify wanerr;
- struct PptpSetLinkInfo setlink;
-};
-
-#endif /* __KERNEL__ */
-#endif /* _CONNTRACK_PPTP_H */
+++ /dev/null
-#ifndef _CONNTRACK_PROTO_GRE_H
-#define _CONNTRACK_PROTO_GRE_H
-#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __u16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __u16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __u16 payload_len; /* size of ppp payload, not inc. gre header */
- __u16 call_id; /* peer's call_id for this session */
- __u32 seq; /* sequence number. Present if S==1 */
- __u32 ack; /* seq number of highest packet recieved by */
- /* sender in this session */
-};
-
-
-/* this is part of ip_conntrack */
-struct ip_ct_gre {
- unsigned int stream_timeout;
- unsigned int timeout;
-};
-
-/* this is part of ip_conntrack_expect */
-struct ip_ct_gre_expect {
- struct ip_ct_gre_keymap *keymap_orig, *keymap_reply;
-};
-
-#ifdef __KERNEL__
-struct ip_conntrack_expect;
-
-/* structure for original <-> reply keymap */
-struct ip_ct_gre_keymap {
- struct list_head list;
-
- struct ip_conntrack_tuple tuple;
-};
-
-
-/* add new tuple->key_reply pair to keymap */
-int ip_ct_gre_keymap_add(struct ip_conntrack_expect *exp,
- struct ip_conntrack_tuple *t,
- int reply);
-
-/* change an existing keymap entry */
-void ip_ct_gre_keymap_change(struct ip_ct_gre_keymap *km,
- struct ip_conntrack_tuple *t);
-
-/* delete keymap entries */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack_expect *exp);
-
-
-/* get pointer to gre key, if present */
-static inline u_int32_t *gre_key(struct gre_hdr *greh)
-{
- if (!greh->key)
- return NULL;
- if (greh->csum || greh->routing)
- return (u_int32_t *) (greh+sizeof(*greh)+4);
- return (u_int32_t *) (greh+sizeof(*greh));
-}
-
-/* get pointer ot gre csum, if present */
-static inline u_int16_t *gre_csum(struct gre_hdr *greh)
-{
- if (!greh->csum)
- return NULL;
- return (u_int16_t *) (greh+sizeof(*greh));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _CONNTRACK_PROTO_GRE_H */
+++ /dev/null
-/* PPTP constants and structs */
-#ifndef _NAT_PPTP_H
-#define _NAT_PPTP_H
-
-/* conntrack private data */
-struct ip_nat_pptp {
- u_int16_t pns_call_id; /* NAT'ed PNS call id */
- u_int16_t pac_call_id; /* NAT'ed PAC call id */
-};
-
-#endif /* _NAT_PPTP_H */
#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */
#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */
#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
-#define NFS_MOUNT_TAGXID 0x8000 /* tagxid */
#define NFS_MOUNT_FLAGMASK 0xFFFF
#endif
u32 c_vers;
unsigned long c_timestamp;
union {
- struct kvec u_vec;
+ struct iovec u_vec;
u32 u_status;
} c_u;
};
int, struct file *);
void nfsd_close(struct file *);
int nfsd_read(struct svc_rqst *, struct svc_fh *,
- loff_t, struct kvec *,int, unsigned long *);
+ loff_t, struct iovec *,int, unsigned long *);
int nfsd_write(struct svc_rqst *, struct svc_fh *,
- loff_t, struct kvec *,int, unsigned long, int *);
+ loff_t, struct iovec *,int, unsigned long, int *);
int nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
int nfsd_symlink(struct svc_rqst *, struct svc_fh *,
struct svc_fh fh;
__u32 offset;
__u32 count;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
svc_fh fh;
__u32 offset;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
struct svc_fh fh;
__u64 offset;
__u32 count;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
__u32 count;
int stable;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
stateid_t rd_stateid; /* request */
u64 rd_offset; /* request */
u32 rd_length; /* request */
- struct kvec rd_iov[RPCSVC_MAXPAGES];
+ struct iovec rd_iov[RPCSVC_MAXPAGES];
int rd_vlen;
struct svc_rqst *rd_rqstp; /* response */
u64 wr_offset; /* request */
u32 wr_stable_how; /* request */
u32 wr_buflen; /* request */
- struct kvec wr_vec[RPCSVC_MAXPAGES]; /* request */
+ struct iovec wr_vec[RPCSVC_MAXPAGES]; /* request */
int wr_vlen;
u32 wr_bytes_written; /* response */
--- /dev/null
+#error THIS FILE SHOULD NO LONGER BE USED
+
+#ifndef _NX_INLINE_H
+#define _NX_INLINE_H
+
+
+// #define NX_DEBUG
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+#include "vserver/network.h"
+
+#if defined(NX_DEBUG)
+#define nxdprintk(x...) printk("nxd: " x)
+#else
+#define nxdprintk(x...)
+#endif
+
+
+extern int proc_pid_nx_info(struct task_struct *, char *);
+
+
+#define get_nx_info(i) __get_nx_info(i,__FILE__,__LINE__)
+
+static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ if (!nxi)
+ return NULL;
+ nxdprintk("get_nx_info(%p[#%d.%d])\t%s:%d\n",
+ nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
+ _file, _line);
+ atomic_inc(&nxi->nx_usecnt);
+ return nxi;
+}
+
+
+#define free_nx_info(nxi) \
+ call_rcu(&nxi->nx_rcu, rcu_free_nx_info, nxi);
+
+#define put_nx_info(i) __put_nx_info(i,__FILE__,__LINE__)
+
+static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
+{
+ if (!nxi)
+ return;
+ nxdprintk("put_nx_info(%p[#%d.%d])\t%s:%d\n",
+ nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
+ _file, _line);
+ if (atomic_dec_and_test(&nxi->nx_usecnt))
+ free_nx_info(nxi);
+}
+
+
+#define set_nx_info(p,i) __set_nx_info(p,i,__FILE__,__LINE__)
+
+static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ BUG_ON(*nxp);
+ if (!nxi)
+ return;
+ nxdprintk("set_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
+ nxi, nxi?nxi->nx_id:0,
+ nxi?atomic_read(&nxi->nx_usecnt):0,
+ nxi?atomic_read(&nxi->nx_refcnt):0,
+ _file, _line);
+ atomic_inc(&nxi->nx_refcnt);
+ *nxp = __get_nx_info(nxi, _file, _line);
+}
+
+#define clr_nx_info(p) __clr_nx_info(p,__FILE__,__LINE__)
+
+static inline void __clr_nx_info(struct nx_info **nxp,
+ const char *_file, int _line)
+{
+ struct nx_info *nxo = *nxp;
+
+ if (!nxo)
+ return;
+ nxdprintk("clr_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
+ nxo, nxo?nxo->nx_id:0,
+ nxo?atomic_read(&nxo->nx_usecnt):0,
+ nxo?atomic_read(&nxo->nx_refcnt):0,
+ _file, _line);
+ *nxp = NULL;
+ wmb();
+ if (nxo && atomic_dec_and_test(&nxo->nx_refcnt))
+ unhash_nx_info(nxo);
+ __put_nx_info(nxo, _file, _line);
+}
+
+
+#define task_get_nx_info(i) __task_get_nx_info(i,__FILE__,__LINE__)
+
+static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
+ const char *_file, int _line)
+{
+ struct nx_info *nxi;
+
+ task_lock(p);
+ nxi = __get_nx_info(p->nx_info, _file, _line);
+ task_unlock(p);
+ return nxi;
+}
+
+#define nx_verify_info(p,i) \
+ __nx_verify_info((p)->nx_info,i,__FILE__,__LINE__)
+
+static __inline__ void __nx_verify_info(
+ struct nx_info *ipa, struct nx_info *ipb,
+ const char *_file, int _line)
+{
+ if (ipa == ipb)
+ return;
+ printk(KERN_ERR "ip bad assumption (%p==%p) at %s:%d\n",
+ ipa, ipb, _file, _line);
+}
+
+
+#define nx_task_nid(t) ((t)->nid)
+
+#define nx_current_nid() nx_task_nid(current)
+
+#define nx_check(c,m) __nx_check(nx_current_nid(),c,m)
+
+#define nx_weak_check(c,m) ((m) ? nx_check(c,m) : 1)
+
+#undef nxdprintk
+#define nxdprintk(x...)
+
+
+#define __nx_flags(v,m,f) (((v) & (m)) ^ (f))
+
+#define __nx_task_flags(t,m,f) \
+ (((t) && ((t)->nx_info)) ? \
+ __nx_flags((t)->nx_info->nx_flags,(m),(f)) : 0)
+
+#define nx_current_flags() \
+ ((current->nx_info) ? current->nx_info->nx_flags : 0)
+
+#define nx_flags(m,f) __nx_flags(nx_current_flags(),(m),(f))
+
+
+#define nx_current_ncaps() \
+ ((current->nx_info) ? current->nx_info->nx_ncaps : 0)
+
+#define nx_ncaps(c) (nx_current_ncaps() & (c))
+
+
+
+#define sock_nx_init(s) do { \
+ (s)->sk_nid = 0; \
+ (s)->sk_nx_info = NULL; \
+ } while (0)
+
+
+
+#endif
#define PG_compound 19 /* Part of a compound page */
#define PG_anon 20 /* Anonymous: anon_vma in mapping */
-#define PG_ckrm_account 21 /* This page is accounted by CKRM */
/*
extern struct pci_dev *isa_bridge;
#endif
-struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
-};
-
-#ifndef CONFIG_PCI_MSI
+#ifndef CONFIG_PCI_USE_VECTOR
static inline void pci_scan_msi_device(struct pci_dev *dev) {}
static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
-static inline void pci_disable_msi(struct pci_dev *dev) {}
-static inline int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec) {return -1;}
-static inline void pci_disable_msix(struct pci_dev *dev) {}
static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
#else
extern void pci_scan_msi_device(struct pci_dev *dev);
extern int pci_enable_msi(struct pci_dev *dev);
-extern void pci_disable_msi(struct pci_dev *dev);
-extern int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec);
-extern void pci_disable_msix(struct pci_dev *dev);
extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+extern int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec);
+extern int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec);
#endif
#endif /* CONFIG_PCI */
#define PCI_DEVICE_ID_TTI_HPT302 0x0006
#define PCI_DEVICE_ID_TTI_HPT371 0x0007
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
-#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 // apparently a 372N variant?
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
-#define PCI_DEVICE_ID_VIA_3238_0 0x0238
#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
-#define PCI_DEVICE_ID_VIA_3269_0 0x0269
#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_82C686_6 0x3068
#define PCI_DEVICE_ID_VIA_8233_0 0x3074
#define PCI_DEVICE_ID_VIA_8633_0 0x3091
-#define PCI_DEVICE_ID_VIA_8367_0 0x3099
+#define PCI_DEVICE_ID_VIA_8367_0 0x3099
#define PCI_DEVICE_ID_VIA_8653_0 0x3101
-#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8622 0x3102
#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
#define PCI_DEVICE_ID_VIA_8361 0x3112
#define PCI_DEVICE_ID_VIA_XM266 0x3116
#define PCI_DEVICE_ID_VIA_PT880 0x3258
#define PCI_DEVICE_ID_VIA_P4M400 0x3209
#define PCI_DEVICE_ID_VIA_8237 0x3227
-#define PCI_DEVICE_ID_VIA_3296_0 0x0296
#define PCI_DEVICE_ID_VIA_86C100A 0x6100
#define PCI_DEVICE_ID_VIA_8231 0x8231
#define PCI_DEVICE_ID_VIA_8231_4 0x8235
#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
#define PCI_DEVICE_ID_INTEL_82875_IG 0x257b
-#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
-#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
*/
enum {
MMAP_PAGE_ZERO = 0x0100000,
- ADDR_COMPAT_LAYOUT = 0x0200000,
- READ_IMPLIES_EXEC = 0x0400000,
ADDR_LIMIT_32BIT = 0x0800000,
SHORT_INODE = 0x1000000,
WHOLE_SECONDS = 0x2000000,
ADDR_LIMIT_3GB = 0x8000000,
};
-/*
- * Security-relevant compatibility flags that must be
- * cleared upon setuid or setgid exec:
- */
-#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC)
-
/*
* Personality types.
*
struct tc_police
{
__u32 index;
+#ifdef CONFIG_NET_CLS_ACT
+ int refcnt;
+ int bindcnt;
+#endif
+/* Turned off because it requires new tc
+ * to work (for now maintain ABI)
+ *
+#ifdef CONFIG_NET_CLS_ACT
+ __u32 capab;
+#endif
+*/
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
- int refcnt;
- int bindcnt;
- __u32 capab;
};
struct tcf_t
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
+#ifdef CONFIG_NET_CLS_ACT
TCA_U32_ACT,
+#endif
+#ifdef CONFIG_NET_CLS_IND
TCA_U32_INDEV,
- TCA_U32_PCNT,
+#endif
__TCA_U32_MAX
};
__u32 val;
int off;
int offmask;
+#ifdef CONFIG_CLS_U32_PERF
+ unsigned long kcnt;
+#endif
};
struct tc_u32_sel
short hoff;
__u32 hmask;
+#ifdef CONFIG_CLS_U32_PERF
+ unsigned long rcnt;
+ unsigned long rhit;
+#endif
struct tc_u32_key keys[0];
};
-#ifdef CONFIG_CLS_U32_PERF
-struct tc_u32_pcnt
-{
- __u64 rcnt;
- __u64 rhit;
- __u64 kcnts[0];
-};
-#endif
/* Flags */
#define TC_U32_TERMINAL 1
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
- TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
- TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
+#ifdef CONFIG_NET_CLS_IND
+ TCA_FW_INDEV,
+#endif
+#ifdef CONFIG_NET_CLS_ACT
+ TCA_FW_ACT,
+#endif
__TCA_FW_MAX
};
}
static inline
-void set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
+int set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
{
+ int ret = 0;
if (ufdset)
- __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
+ ret = __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
+ if (ret)
+ return -EFAULT;
+ return 0;
}
static inline
extern struct file_operations random_fops, urandom_fops;
#endif
-unsigned int get_random_int(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
-
#endif /* __KERNEL___ */
#endif /* _LINUX_RANDOM_H */
+++ /dev/null
-/* Rule-based Classification Engine (RBCE) module
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- *
- * Module for loading of classification policies and providing
- * a user API for Class-based Kernel Resource Management (CKRM)
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
-/* Changes
- *
- * 25 Mar 2004
- * Integrate RBCE and CRBE into a single module
- *
- */
-
-#ifndef RBCE_H
-#define RBCE_H
-
-// data types defined in main rbcemod.c
-struct rbce_private_data;
-struct rbce_class;
-struct ckrm_core_class;
-
-#ifndef RBCE_EXTENSION
-
-/****************************************************************************
- *
- * RBCE STANDALONE VERSION, NO CHOICE FOR DATA COLLECTION
- *
- ****************************************************************************/
-
-#ifdef RBCE_SHOW_INCL
-#warning " ... RBCE .."
-#endif
-
-#define RBCE_MOD_DESCR "Rule Based Classification Engine Module for CKRM"
-#define RBCE_MOD_NAME "rbce"
-
-/* extension to private data: NONE */
-struct rbce_ext_private_data {
- /* empty data */
-};
-static inline void init_ext_private_data(struct rbce_private_data *dst)
-{
-}
-
-/* sending notification to user: NONE */
-
-static void notify_class_action(struct rbce_class *cls, int action)
-{
-}
-static inline void send_fork_notification(struct task_struct *tsk,
- struct ckrm_core_class *cls)
-{
-}
-static inline void send_exit_notification(struct task_struct *tsk)
-{
-}
-static inline void send_manual_notification(struct task_struct *tsk)
-{
-}
-
-/* extension initialization and destruction at module init and exit */
-static inline int init_rbce_ext_pre(void)
-{
- return 0;
-}
-static inline int init_rbce_ext_post(void)
-{
- return 0;
-}
-static inline void exit_rbce_ext(void)
-{
-}
-
-#else
-
-/***************************************************************************
- *
- * RBCE with User Level Notification
- *
- ***************************************************************************/
-
-#ifdef RBCE_SHOW_INCL
-#warning " ... CRBCE .."
-#ifdef RBCE_DO_SAMPLE
-#warning " ... CRBCE doing sampling ..."
-#endif
-#ifdef RBCE_DO_DELAY
-#warning " ... CRBCE doing delay ..."
-#endif
-#endif
-
-#define RBCE_MOD_DESCR "Rule Based Classification Engine Module" \
- "with Data Sampling/Delivery for CKRM"
-#define RBCE_MOD_NAME "crbce"
-
-#include <linux/crbce.h>
-
-struct rbce_ext_private_data {
- struct task_sample_info sample;
-};
-
-static void notify_class_action(struct rbce_class *cls, int action);
-#if 0
-static void send_fork_notification(struct task_struct *tsk,
- struct ckrm_core_class *cls);
-static void send_exit_notification(struct task_struct *tsk);
-static void send_manual_notification(struct task_struct *tsk);
-#endif
-
-#endif
-
-#endif // RBCE_H
extern struct file_operations stats_fileops;
extern struct file_operations config_fileops;
extern struct file_operations members_fileops;
-extern struct file_operations reclassify_fileops;
extern struct file_operations rcfs_file_operations;
// Callbacks into rcfs from ckrm
static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
{
(version == KEY_FORMAT_3_5) ?
- (void)(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
- (void)(set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
+ (key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
+ (set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
}
static inline void set_le_key_k_type (int version, struct key * key, int type)
{
(version == KEY_FORMAT_3_5) ?
- (void)(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
- (void)(set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
+ (key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
+ (set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
}
static inline void set_le_ih_k_type (struct item_head * ih, int type)
{
REISERFS_XATTRS,
REISERFS_XATTRS_USER,
REISERFS_POSIXACL,
- REISERFS_TAGXID,
REISERFS_TEST1,
REISERFS_TEST2,
extern struct semaphore rtnl_sem;
+#define rtnl_exlock() do { } while(0)
+#define rtnl_exunlock() do { } while(0)
+#define rtnl_exlock_nowait() (0)
+
#define rtnl_shlock() down(&rtnl_sem)
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
extern int nr_threads;
extern int last_pid;
DECLARE_PER_CPU(unsigned long, process_counts);
-// DECLARE_PER_CPU(struct runqueue, runqueues); -- removed after ckrm cpu v7 merge
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
#include <linux/aio.h>
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
-extern unsigned long
-arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-extern void arch_unmap_area(struct vm_area_struct *area);
-extern void arch_unmap_area_topdown(struct vm_area_struct *area);
-
-
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- unsigned long (*get_unmapped_exec_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- void (*unmap_area) (struct vm_area_struct *area);
- unsigned long mmap_base; /* base of mmap area */
unsigned long free_area_cache; /* first hole */
+ unsigned long non_executable_cache; /* last hole top */
+ unsigned long mmap_top; /* top of mmap area */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
mm_context_t context;
struct vx_info *mm_vx_info;
- /* Token based thrashing protection. */
- unsigned long swap_token_time;
- char recent_pagein;
-
/* coredumping support */
int core_waiters;
struct completion *core_startup_done, core_done;
struct kioctx *ioctx_list;
struct kioctx default_kioctx;
-#ifdef CONFIG_CKRM_RES_MEM
- struct ckrm_mem_res *memclass;
- struct list_head tasklist; /* list of all tasks sharing this address space */
- spinlock_t peertask_lock; /* protect above tasklist */
-#endif
};
extern int mmlist_nr;
struct audit_context; /* See audit.c */
struct mempolicy;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-/**
- * ckrm_cpu_demand_stat - used to track the cpu demand of a task/class
- * @run: how much time it has been running since the counter started
- * @total: total time since the counter started
- * @last_sleep: the last time it sleeps, last_sleep = 0 when not sleeping
- * @recalc_interval: how often do we recalculate the cpu_demand
- * @cpu_demand: moving average of run/total
- */
-struct ckrm_cpu_demand_stat {
- unsigned long long run;
- unsigned long long total;
- unsigned long long last_sleep;
- unsigned long long recalc_interval;
- unsigned long cpu_demand; /*estimated cpu demand */
-};
-#endif
-
-
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
unsigned int time_slice, first_time_slice;
struct list_head tasks;
- /*
- * ptrace_list/ptrace_children forms the list of my children
- * that were stolen by a ptracer.
- */
struct list_head ptrace_children;
struct list_head ptrace_list;
*/
struct task_struct *real_parent; /* real parent process (when being debugged) */
struct task_struct *parent; /* parent process */
- /*
- * children/sibling forms the list of my children plus the
- * tasks I'm ptracing.
- */
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
+
sigset_t blocked, real_blocked;
struct sigpending pending;
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
-
- /* TUX state */
- void *tux_info;
- void (*tux_exit)(void);
-
void *security;
struct audit_context *audit_context;
struct io_context *io_context;
- int ioprio;
-
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct list_head taskclass_link;
#ifdef CONFIG_CKRM_CPU_SCHEDULE
struct ckrm_cpu_class *cpu_class;
- //track cpu demand of this task
- struct ckrm_cpu_demand_stat demand_stat;
-#endif //CONFIG_CKRM_CPU_SCHEDULE
+#endif
#endif // CONFIG_CKRM_TYPE_TASKCLASS
-#ifdef CONFIG_CKRM_RES_MEM
- struct list_head mm_peers; // list of tasks using same mm_struct
-#endif // CONFIG_CKRM_RES_MEM
#endif // CONFIG_CKRM
+
struct task_delay_info delays;
};
atomic_inc(&u->__count);
return u;
}
-
extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
}
#endif
-
/*
* Routines for handling mm_structs
*/
return mm;
}
-
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
#define def_delay_var(var) unsigned long long var
#define get_delay(tsk,field) ((tsk)->delays.field)
+#define delay_value(x) (((unsigned long)(x))/1000)
#define start_delay(var) ((var) = sched_clock())
#define start_delay_set(var,flg) (set_delay_flag(current,flg),(var) = sched_clock())
#define inc_delay(tsk,field) (((tsk)->delays.field)++)
+#define add_delay_ts(tsk,field,start_ts,end_ts) ((tsk)->delays.field += delay_value((end_ts)-(start_ts)))
+#define add_delay_clear(tsk,field,start_ts,flg) (add_delay_ts(tsk,field,start_ts,sched_clock()),clear_delay_flag(tsk,flg))
-/* because of hardware timer drifts in SMPs and task continue on different cpu
- * then where the start_ts was taken there is a possibility that
- * end_ts < start_ts by some usecs. In this case we ignore the diff
- * and add nothing to the total.
- */
-#ifdef CONFIG_SMP
-#define test_ts_integrity(start_ts,end_ts) (likely((end_ts) > (start_ts)))
-#else
-#define test_ts_integrity(start_ts,end_ts) (1)
-#endif
-
-#define add_delay_ts(tsk,field,start_ts,end_ts) \
- do { if (test_ts_integrity(start_ts,end_ts)) (tsk)->delays.field += ((end_ts)-(start_ts)); } while (0)
-
-#define add_delay_clear(tsk,field,start_ts,flg) \
- do { \
- unsigned long long now = sched_clock();\
- add_delay_ts(tsk,field,start_ts,now); \
- clear_delay_flag(tsk,flg); \
- } while (0)
-
-static inline void add_io_delay(unsigned long long dstart)
+static inline void add_io_delay(unsigned long dstart)
{
struct task_struct * tsk = current;
- unsigned long long now = sched_clock();
- unsigned long long val;
-
- if (test_ts_integrity(dstart,now))
- val = now - dstart;
- else
- val = 0;
+ unsigned long val = delay_value(sched_clock()-dstart);
if (test_delay_flag(tsk,PF_MEMIO)) {
tsk->delays.mem_iowait_total += val;
tsk->delays.num_memwaits++;
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
-#else
-static inline void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
-}
-#endif
-
#endif /* __KERNEL__ */
#endif
* reason (mips != alpha!)
*/
#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2 | UART_MCR_OUT1)
-#elif defined(CONFIG_SBC8560)
-/*
- * WindRiver did something similarly broken on their SBC8560 board. The
- * UART tristates its IRQ output while OUT2 is clear, but they pulled
- * the interrupt line _up_ instead of down, so if we register the IRQ
- * while the UART is in that state, we die in an IRQ storm. */
-#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2)
#else
#define ALPHA_KLUDGE_MCR 0
#endif
/* PPC CPM type number */
#define PORT_CPM 58
-/* MPC52xx type numbers */
-#define PORT_MPC52xx 59
-
#ifdef __KERNEL__
#include <linux/config.h>
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
- struct user_struct *mlock_user;
+ struct user_struct * mlock_user;
};
/* shm_mode upper byte flags */
#define SHMEM_NR_DIRECT 16
-#define TMPFS_SUPER_MAGIC 0x01021994
-
-
struct shmem_inode_info {
spinlock_t lock;
unsigned long next_index;
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
- char cb[40];
+ char cb[48];
unsigned int len,
data_len,
#endif
#endif
- xid_t xid; /* VServer context ID */
/* These elements must be at the end, see alloc_skb() for details. */
extern void skb_init(void);
extern void skb_add_mtu(int mtu);
-struct skb_iter {
- /* Iteration functions set these */
- unsigned char *data;
- unsigned int len;
-
- /* Private to iteration */
- unsigned int nextfrag;
- struct sk_buff *fraglist;
-};
-
-/* Keep iterating until skb_iter_next returns false. */
-extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
-extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
-/* Call this if aborting loop before !skb_iter_next */
-extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
-
-struct tux_req_struct;
-
#ifdef CONFIG_NETFILTER
static inline void nf_conntrack_put(struct nf_ct_info *nfct)
{
+++ /dev/null
-/*
- * Definitions for MIBs
- *
- * Author: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
- */
-
-#ifndef _LINUX_SNMP_H
-#define _LINUX_SNMP_H
-
-/* ipstats mib definitions */
-/*
- * RFC 1213: MIB-II
- * RFC 2011 (updates 1213): SNMPv2-MIB-IP
- * RFC 2863: Interfaces Group MIB
- * RFC 2465: IPv6 MIB: General Group
- * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables
- */
-enum
-{
- IPSTATS_MIB_NUM = 0,
- IPSTATS_MIB_INRECEIVES, /* InReceives */
- IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
- IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */
- IPSTATS_MIB_INNOROUTES, /* InNoRoutes */
- IPSTATS_MIB_INADDRERRORS, /* InAddrErrors */
- IPSTATS_MIB_INUNKNOWNPROTOS, /* InUnknownProtos */
- IPSTATS_MIB_INTRUNCATEDPKTS, /* InTruncatedPkts */
- IPSTATS_MIB_INDISCARDS, /* InDiscards */
- IPSTATS_MIB_INDELIVERS, /* InDelivers */
- IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
- IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
- IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */
- IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */
- IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */
- IPSTATS_MIB_REASMREQDS, /* ReasmReqds */
- IPSTATS_MIB_REASMOKS, /* ReasmOKs */
- IPSTATS_MIB_REASMFAILS, /* ReasmFails */
- IPSTATS_MIB_FRAGOKS, /* FragOKs */
- IPSTATS_MIB_FRAGFAILS, /* FragFails */
- IPSTATS_MIB_FRAGCREATES, /* FragCreates */
- IPSTATS_MIB_INMCASTPKTS, /* InMcastPkts */
- IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */
- __IPSTATS_MIB_MAX
-};
-
-/* icmp mib definitions */
-/*
- * RFC 1213: MIB-II ICMP Group
- * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group
- */
-enum
-{
- ICMP_MIB_NUM = 0,
- ICMP_MIB_INMSGS, /* InMsgs */
- ICMP_MIB_INERRORS, /* InErrors */
- ICMP_MIB_INDESTUNREACHS, /* InDestUnreachs */
- ICMP_MIB_INTIMEEXCDS, /* InTimeExcds */
- ICMP_MIB_INPARMPROBS, /* InParmProbs */
- ICMP_MIB_INSRCQUENCHS, /* InSrcQuenchs */
- ICMP_MIB_INREDIRECTS, /* InRedirects */
- ICMP_MIB_INECHOS, /* InEchos */
- ICMP_MIB_INECHOREPS, /* InEchoReps */
- ICMP_MIB_INTIMESTAMPS, /* InTimestamps */
- ICMP_MIB_INTIMESTAMPREPS, /* InTimestampReps */
- ICMP_MIB_INADDRMASKS, /* InAddrMasks */
- ICMP_MIB_INADDRMASKREPS, /* InAddrMaskReps */
- ICMP_MIB_OUTMSGS, /* OutMsgs */
- ICMP_MIB_OUTERRORS, /* OutErrors */
- ICMP_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */
- ICMP_MIB_OUTTIMEEXCDS, /* OutTimeExcds */
- ICMP_MIB_OUTPARMPROBS, /* OutParmProbs */
- ICMP_MIB_OUTSRCQUENCHS, /* OutSrcQuenchs */
- ICMP_MIB_OUTREDIRECTS, /* OutRedirects */
- ICMP_MIB_OUTECHOS, /* OutEchos */
- ICMP_MIB_OUTECHOREPS, /* OutEchoReps */
- ICMP_MIB_OUTTIMESTAMPS, /* OutTimestamps */
- ICMP_MIB_OUTTIMESTAMPREPS, /* OutTimestampReps */
- ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */
- ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */
- __ICMP_MIB_MAX
-};
-
-/* icmp6 mib definitions */
-/*
- * RFC 2466: ICMPv6-MIB
- */
-enum
-{
- ICMP6_MIB_NUM = 0,
- ICMP6_MIB_INMSGS, /* InMsgs */
- ICMP6_MIB_INERRORS, /* InErrors */
- ICMP6_MIB_INDESTUNREACHS, /* InDestUnreachs */
- ICMP6_MIB_INPKTTOOBIGS, /* InPktTooBigs */
- ICMP6_MIB_INTIMEEXCDS, /* InTimeExcds */
- ICMP6_MIB_INPARMPROBLEMS, /* InParmProblems */
- ICMP6_MIB_INECHOS, /* InEchos */
- ICMP6_MIB_INECHOREPLIES, /* InEchoReplies */
- ICMP6_MIB_INGROUPMEMBQUERIES, /* InGroupMembQueries */
- ICMP6_MIB_INGROUPMEMBRESPONSES, /* InGroupMembResponses */
- ICMP6_MIB_INGROUPMEMBREDUCTIONS, /* InGroupMembReductions */
- ICMP6_MIB_INROUTERSOLICITS, /* InRouterSolicits */
- ICMP6_MIB_INROUTERADVERTISEMENTS, /* InRouterAdvertisements */
- ICMP6_MIB_INNEIGHBORSOLICITS, /* InNeighborSolicits */
- ICMP6_MIB_INNEIGHBORADVERTISEMENTS, /* InNeighborAdvertisements */
- ICMP6_MIB_INREDIRECTS, /* InRedirects */
- ICMP6_MIB_OUTMSGS, /* OutMsgs */
- ICMP6_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */
- ICMP6_MIB_OUTPKTTOOBIGS, /* OutPktTooBigs */
- ICMP6_MIB_OUTTIMEEXCDS, /* OutTimeExcds */
- ICMP6_MIB_OUTPARMPROBLEMS, /* OutParmProblems */
- ICMP6_MIB_OUTECHOREPLIES, /* OutEchoReplies */
- ICMP6_MIB_OUTROUTERSOLICITS, /* OutRouterSolicits */
- ICMP6_MIB_OUTNEIGHBORSOLICITS, /* OutNeighborSolicits */
- ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS, /* OutNeighborAdvertisements */
- ICMP6_MIB_OUTREDIRECTS, /* OutRedirects */
- ICMP6_MIB_OUTGROUPMEMBRESPONSES, /* OutGroupMembResponses */
- ICMP6_MIB_OUTGROUPMEMBREDUCTIONS, /* OutGroupMembReductions */
- __ICMP6_MIB_MAX
-};
-
-/* tcp mib definitions */
-/*
- * RFC 1213: MIB-II TCP group
- * RFC 2012 (updates 1213): SNMPv2-MIB-TCP
- */
-enum
-{
- TCP_MIB_NUM = 0,
- TCP_MIB_RTOALGORITHM, /* RtoAlgorithm */
- TCP_MIB_RTOMIN, /* RtoMin */
- TCP_MIB_RTOMAX, /* RtoMax */
- TCP_MIB_MAXCONN, /* MaxConn */
- TCP_MIB_ACTIVEOPENS, /* ActiveOpens */
- TCP_MIB_PASSIVEOPENS, /* PassiveOpens */
- TCP_MIB_ATTEMPTFAILS, /* AttemptFails */
- TCP_MIB_ESTABRESETS, /* EstabResets */
- TCP_MIB_CURRESTAB, /* CurrEstab */
- TCP_MIB_INSEGS, /* InSegs */
- TCP_MIB_OUTSEGS, /* OutSegs */
- TCP_MIB_RETRANSSEGS, /* RetransSegs */
- TCP_MIB_INERRS, /* InErrs */
- TCP_MIB_OUTRSTS, /* OutRsts */
- __TCP_MIB_MAX
-};
-
-/* udp mib definitions */
-/*
- * RFC 1213: MIB-II UDP group
- * RFC 2013 (updates 1213): SNMPv2-MIB-UDP
- */
-enum
-{
- UDP_MIB_NUM = 0,
- UDP_MIB_INDATAGRAMS, /* InDatagrams */
- UDP_MIB_NOPORTS, /* NoPorts */
- UDP_MIB_INERRORS, /* InErrors */
- UDP_MIB_OUTDATAGRAMS, /* OutDatagrams */
- __UDP_MIB_MAX
-};
-
-/* sctp mib definitions */
-/*
- * draft-ietf-sigtran-sctp-mib-07.txt
- */
-enum
-{
- SCTP_MIB_NUM = 0,
- SCTP_MIB_CURRESTAB, /* CurrEstab */
- SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */
- SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */
- SCTP_MIB_ABORTEDS, /* Aborteds */
- SCTP_MIB_SHUTDOWNS, /* Shutdowns */
- SCTP_MIB_OUTOFBLUES, /* OutOfBlues */
- SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */
- SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */
- SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */
- SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */
- SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */
- SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */
- SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */
- SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */
- SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */
- SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */
- SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */
- SCTP_MIB_RTOALGORITHM, /* RtoAlgorithm */
- SCTP_MIB_RTOMIN, /* RtoMin */
- SCTP_MIB_RTOMAX, /* RtoMax */
- SCTP_MIB_RTOINITIAL, /* RtoInitial */
- SCTP_MIB_VALCOOKIELIFE, /* ValCookieLife */
- SCTP_MIB_MAXINITRETR, /* MaxInitRetr */
- __SCTP_MIB_MAX
-};
-
-/* linux mib definitions */
-enum
-{
- LINUX_MIB_NUM = 0,
- LINUX_MIB_SYNCOOKIESSENT, /* SyncookiesSent */
- LINUX_MIB_SYNCOOKIESRECV, /* SyncookiesRecv */
- LINUX_MIB_SYNCOOKIESFAILED, /* SyncookiesFailed */
- LINUX_MIB_EMBRYONICRSTS, /* EmbryonicRsts */
- LINUX_MIB_PRUNECALLED, /* PruneCalled */
- LINUX_MIB_RCVPRUNED, /* RcvPruned */
- LINUX_MIB_OFOPRUNED, /* OfoPruned */
- LINUX_MIB_OUTOFWINDOWICMPS, /* OutOfWindowIcmps */
- LINUX_MIB_LOCKDROPPEDICMPS, /* LockDroppedIcmps */
- LINUX_MIB_ARPFILTER, /* ArpFilter */
- LINUX_MIB_TIMEWAITED, /* TimeWaited */
- LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */
- LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */
- LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */
- LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */
- LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */
- LINUX_MIB_DELAYEDACKS, /* DelayedACKs */
- LINUX_MIB_DELAYEDACKLOCKED, /* DelayedACKLocked */
- LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */
- LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */
- LINUX_MIB_LISTENDROPS, /* ListenDrops */
- LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */
- LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */
- LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */
- LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */
- LINUX_MIB_TCPHPHITS, /* TCPHPHits */
- LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */
- LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */
- LINUX_MIB_TCPHPACKS, /* TCPHPAcks */
- LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */
- LINUX_MIB_TCPSACKRECOVERY, /* TCPSackRecovery */
- LINUX_MIB_TCPSACKRENEGING, /* TCPSACKReneging */
- LINUX_MIB_TCPFACKREORDER, /* TCPFACKReorder */
- LINUX_MIB_TCPSACKREORDER, /* TCPSACKReorder */
- LINUX_MIB_TCPRENOREORDER, /* TCPRenoReorder */
- LINUX_MIB_TCPTSREORDER, /* TCPTSReorder */
- LINUX_MIB_TCPFULLUNDO, /* TCPFullUndo */
- LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */
- LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */
- LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */
- LINUX_MIB_TCPLOSS, /* TCPLoss */
- LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */
- LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */
- LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
- LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */
- LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */
- LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */
- LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */
- LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */
- LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */
- LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */
- LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */
- LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */
- LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */
- LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
- LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */
- LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */
- LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */
- LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */
- LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */
- LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */
- LINUX_MIB_TCPABORTONTIMEOUT, /* TCPAbortOnTimeout */
- LINUX_MIB_TCPABORTONLINGER, /* TCPAbortOnLinger */
- LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */
- LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */
- __LINUX_MIB_MAX
-};
-
-#endif /* _LINUX_SNMP_H */
#define SOL_NETBEUI 267
#define SOL_LLC 268
-/* PlanetLab PL2525: reset the context ID of an existing socket */
-#define SO_SETXID SO_PEERCRED
-
/* IPX options */
#define IPX_TYPE 1
extern int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct socket;
-struct file * sock_map_file(struct socket *sock);
-extern int sock_map_fd(struct socket *sock);
-extern struct socket *sockfd_lookup(int fd, int *err);
-
#endif
#endif /* not kernel and not glibc */
#endif /* _LINUX_SOCKET_H */
unsigned int nlink;
uid_t uid;
gid_t gid;
- xid_t xid;
dev_t rdev;
loff_t size;
struct timespec atime;
struct auth_cred {
uid_t uid;
gid_t gid;
- xid_t xid;
struct group_info *group_info;
};
cl_autobind : 1,/* use getport() */
cl_droppriv : 1,/* enable NFS suid hack */
cl_oneshot : 1,/* dispose after use */
- cl_dead : 1,/* abandoned */
- cl_tagxid : 1;/* do xid tagging */
+ cl_dead : 1;/* abandoned */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
struct rpc_portmap * cl_pmap; /* port mapping */
* read responses (that have a header, and some data pages, and possibly
* a tail) and means we can share some client side routines.
*
- * The xdr_buf.head kvec always points to the first page in the rq_*pages
+ * The xdr_buf.head iovec always points to the first page in the rq_*pages
* list. The xdr_buf.pages pointer points to the second page on that
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
*/
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2)
-static inline u32 svc_getu32(struct kvec *iov)
+static inline u32 svc_getu32(struct iovec *iov)
{
u32 val, *vp;
vp = iov->iov_base;
iov->iov_len -= sizeof(u32);
return val;
}
-static inline void svc_putu32(struct kvec *iov, u32 val)
+static inline void svc_putu32(struct iovec *iov, u32 val)
{
u32 *vp = iov->iov_base + iov->iov_len;
*vp = val;
xdr_argsize_check(struct svc_rqst *rqstp, u32 *p)
{
char *cp = (char *)p;
- struct kvec *vec = &rqstp->rq_arg.head[0];
+ struct iovec *vec = &rqstp->rq_arg.head[0];
return cp - (char*)vec->iov_base <= vec->iov_len;
}
static inline int
xdr_ressize_check(struct svc_rqst *rqstp, u32 *p)
{
- struct kvec *vec = &rqstp->rq_res.head[0];
+ struct iovec *vec = &rqstp->rq_res.head[0];
char *cp = (char*)p;
vec->iov_len = cp - (char*)vec->iov_base;
* operations and/or has a need for scatter/gather involving pages.
*/
struct xdr_buf {
- struct kvec head[1], /* RPC header + non-page data */
+ struct iovec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
struct page ** pages; /* Array of contiguous pages */
}
/*
- * Adjust kvec to reflect end of xdr'ed data (RPC client XDR)
+ * Adjust iovec to reflect end of xdr'ed data (RPC client XDR)
*/
static inline int
-xdr_adjust_iovec(struct kvec *iov, u32 *p)
+xdr_adjust_iovec(struct iovec *iov, u32 *p)
{
return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
}
-void xdr_shift_iovec(struct kvec *, int, size_t);
+void xdr_shift_iovec(struct iovec *, int, size_t);
/*
* Maximum number of iov's we use.
/*
* XDR buffer helper functions
*/
-extern int xdr_kmap(struct kvec *, struct xdr_buf *, size_t);
+extern int xdr_kmap(struct iovec *, struct xdr_buf *, size_t);
extern void xdr_kunmap(struct xdr_buf *, size_t);
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void _copy_from_pages(char *, struct page **, size_t, size_t);
-extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
+extern void xdr_buf_from_iov(struct iovec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len);
struct xdr_buf *buf; /* XDR buffer to read/write */
uint32_t *end; /* end of available buffer space */
- struct kvec *iov; /* pointer to the current kvec */
+ struct iovec *iov; /* pointer to the current iovec */
};
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p);
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */
-extern void out_of_memory(int gfp_mask);
+extern void out_of_memory(void);
/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma,
unsigned long addr);
-/* linux/mm/thrash.c */
-#ifdef CONFIG_SWAP
-extern struct mm_struct * swap_token_mm;
-extern void grab_swap_token(void);
-extern void __put_swap_token(struct mm_struct *);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
- if (has_swap_token(mm))
- __put_swap_token(mm);
-}
-#else /* CONFIG_SWAP */
-#define put_swap_token(x) do { } while(0)
-#define grab_swap_token do { } while(0)
-#define has_swap_token 0
-#endif /* CONFIG_SWAP */
/* linux/mm/swapfile.c */
extern long total_swap_pages;
KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */
KERN_HZ_TIMER=65, /* int: hz timer on or off */
KERN_VSHELPER=66, /* string: path to vshelper policy agent */
- KERN_DUMP=67, /* dir: dump parameters */
};
VM_BLOCK_DUMP=24, /* block dump mode */
VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
- VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */
};
NET_DECNET=15,
NET_ECONET=16,
NET_SCTP=17,
- NET_TUX=18,
};
/* /proc/sys/kernel/random */
NET_TCP_BIC_LOW_WINDOW=104,
NET_TCP_DEFAULT_WIN_SCALE=105,
NET_TCP_MODERATE_RCVBUF=106,
-#ifdef CONFIG_ICMP_IPOD
- NET_IPV4_ICMP_IPOD_VERSION,
- NET_IPV4_ICMP_IPOD_ENABLED,
- NET_IPV4_ICMP_IPOD_HOST,
- NET_IPV4_ICMP_IPOD_MASK,
- NET_IPV4_ICMP_IPOD_KEY,
-#endif
};
enum {
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
};
-/* /proc/sys/net/tux/ */
-enum {
- NET_TUX_DOCROOT = 1,
- NET_TUX_LOGFILE = 2,
- NET_TUX_EXTCGI = 3,
- NET_TUX_STOP = 4,
- NET_TUX_CLIENTPORT = 5,
- NET_TUX_LOGGING = 6,
- NET_TUX_SERVERPORT = 7,
- NET_TUX_THREADS = 8,
- NET_TUX_KEEPALIVE_TIMEOUT = 9,
- NET_TUX_MAX_KEEPALIVE_BW = 10,
- NET_TUX_DEFER_ACCEPT = 11,
- NET_TUX_MAX_FREE_REQUESTS = 12,
- NET_TUX_MAX_CONNECT = 13,
- NET_TUX_MAX_BACKLOG = 14,
- NET_TUX_MODE_FORBIDDEN = 15,
- NET_TUX_MODE_ALLOWED = 16,
- NET_TUX_MODE_USERSPACE = 17,
- NET_TUX_MODE_CGI = 18,
- NET_TUX_CGI_UID = 19,
- NET_TUX_CGI_GID = 20,
- NET_TUX_CGIROOT = 21,
- NET_TUX_LOGENTRY_ALIGN_ORDER = 22,
- NET_TUX_NONAGLE = 23,
- NET_TUX_ACK_PINGPONG = 24,
- NET_TUX_PUSH_ALL = 25,
- NET_TUX_ZEROCOPY_PARSE = 26,
- NET_CONFIG_TUX_DEBUG_BLOCKING = 27,
- NET_TUX_PAGE_AGE_START = 28,
- NET_TUX_PAGE_AGE_ADV = 29,
- NET_TUX_PAGE_AGE_MAX = 30,
- NET_TUX_VIRTUAL_SERVER = 31,
- NET_TUX_MAX_OBJECT_SIZE = 32,
- NET_TUX_COMPRESSION = 33,
- NET_TUX_NOID = 34,
- NET_TUX_CGI_INHERIT_CPU = 35,
- NET_TUX_CGI_CPU_MASK = 36,
- NET_TUX_ZEROCOPY_HEADER = 37,
- NET_TUX_ZEROCOPY_SENDFILE = 38,
- NET_TUX_ALL_USERSPACE = 39,
- NET_TUX_REDIRECT_LOGGING = 40,
- NET_TUX_REFERER_LOGGING = 41,
- NET_TUX_MAX_HEADER_LEN = 42,
- NET_TUX_404_PAGE = 43,
- NET_TUX_MAX_KEEPALIVES = 44,
- NET_TUX_IGNORE_QUERY = 45,
-};
-
/* CTL_PROC names: */
/* CTL_FS names: */
void **context);
typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
extern int proc_dostring(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_bset(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
- struct file *, void __user *, size_t *, loff_t *);
+ struct file *, void __user *, size_t *);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
#ifndef _SYSFS_H_
#define _SYSFS_H_
-#define SYSFS_SUPER_MAGIC 0x62656572
-
struct kobject;
struct module;
#include <linux/types.h>
struct task_delay_info {
-#if defined CONFIG_DELAY_ACCT
+#ifdef CONFIG_DELAY_ACCT
/* delay statistics in usecs */
uint64_t waitcpu_total;
uint64_t runcpu_total;
uint32_t runs;
uint32_t num_iowaits;
uint32_t num_memwaits;
-#endif
+#endif
};
#endif // _LINUX_TASKDELAYS_H
/* FIFO of established children */
struct open_request *accept_queue;
#ifndef CONFIG_ACCEPT_QUEUES
- struct open_request *accept_queue_tail;
+ struct open_request *accept_queue_tail;
#endif
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
__u32 cnt; /* increase cwnd by 1 after this number of ACKs */
__u32 last_max_cwnd; /* last maximium snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
- __u32 last_stamp; /* time when updated last_cwnd */
} bictcp;
#ifdef CONFIG_ACCEPT_QUEUES
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
-
-/* The following two typedef's are for vserver */
typedef unsigned int xid_t;
typedef unsigned int nid_t;
--- /dev/null
+#error THIS FILE SHOULD NO LONGER BE USED
+
+
+#ifndef _VX_INLINE_H
+#define _VX_INLINE_H
+
+
+// #define VX_DEBUG
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+#include "vserver/context.h"
+#include "vserver/limit.h"
+#include "vserver/cvirt.h"
+
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
+
+
+extern int proc_pid_vx_info(struct task_struct *, char *);
+
+
+#define get_vx_info(i) __get_vx_info(i,__FILE__,__LINE__)
+
+static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
+ const char *_file, int _line)
+{
+ if (!vxi)
+ return NULL;
+ vxdprintk("get_vx_info(%p[#%d.%d])\t%s:%d\n",
+ vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
+ _file, _line);
+ atomic_inc(&vxi->vx_usecnt);
+ return vxi;
+}
+
+
+#define free_vx_info(vxi) \
+ call_rcu(&vxi->vx_rcu, rcu_free_vx_info, vxi);
+
+#define put_vx_info(i) __put_vx_info(i,__FILE__,__LINE__)
+
+static inline void __put_vx_info(struct vx_info *vxi, const char *_file, int _line)
+{
+ if (!vxi)
+ return;
+ vxdprintk("put_vx_info(%p[#%d.%d])\t%s:%d\n",
+ vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
+ _file, _line);
+ if (atomic_dec_and_test(&vxi->vx_usecnt))
+ free_vx_info(vxi);
+}
+
+#define set_vx_info(p,i) __set_vx_info(p,i,__FILE__,__LINE__)
+
+static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+ const char *_file, int _line)
+{
+ BUG_ON(*vxp);
+ if (!vxi)
+ return;
+ vxdprintk("set_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
+ vxi, vxi?vxi->vx_id:0,
+ vxi?atomic_read(&vxi->vx_usecnt):0,
+ vxi?atomic_read(&vxi->vx_refcnt):0,
+ _file, _line);
+ atomic_inc(&vxi->vx_refcnt);
+ *vxp = __get_vx_info(vxi, _file, _line);
+}
+
+#define clr_vx_info(p) __clr_vx_info(p,__FILE__,__LINE__)
+
+static inline void __clr_vx_info(struct vx_info **vxp,
+ const char *_file, int _line)
+{
+ struct vx_info *vxo = *vxp;
+
+ if (!vxo)
+ return;
+ vxdprintk("clr_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
+ vxo, vxo?vxo->vx_id:0,
+ vxo?atomic_read(&vxo->vx_usecnt):0,
+ vxo?atomic_read(&vxo->vx_refcnt):0,
+ _file, _line);
+ *vxp = NULL;
+ wmb();
+ if (vxo && atomic_dec_and_test(&vxo->vx_refcnt))
+ unhash_vx_info(vxo);
+ __put_vx_info(vxo, _file, _line);
+}
+
+
+#define task_get_vx_info(i) __task_get_vx_info(i,__FILE__,__LINE__)
+
+static __inline__ struct vx_info *__task_get_vx_info(struct task_struct *p,
+ const char *_file, int _line)
+{
+ struct vx_info *vxi;
+
+ task_lock(p);
+ vxi = __get_vx_info(p->vx_info, _file, _line);
+ task_unlock(p);
+ return vxi;
+}
+
+
+#define vx_verify_info(p,i) \
+ __vx_verify_info((p)->vx_info,i,__FILE__,__LINE__)
+
+static __inline__ void __vx_verify_info(
+ struct vx_info *vxa, struct vx_info *vxb,
+ const char *_file, int _line)
+{
+ if (vxa == vxb)
+ return;
+ printk(KERN_ERR "vx bad assumption (%p==%p) at %s:%d\n",
+ vxa, vxb, _file, _line);
+}
+
+
+#define vx_task_xid(t) ((t)->xid)
+
+#define vx_current_xid() vx_task_xid(current)
+
+#define vx_check(c,m) __vx_check(vx_current_xid(),c,m)
+
+#define vx_weak_check(c,m) ((m) ? vx_check(c,m) : 1)
+
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally agains supplied argument
+ */
+static __inline__ int __vx_check(xid_t cid, xid_t id, unsigned int mode)
+{
+ if (mode & VX_ARG_MASK) {
+ if ((mode & VX_IDENT) &&
+ (id == cid))
+ return 1;
+ }
+ if (mode & VX_ATR_MASK) {
+ if ((mode & VX_DYNAMIC) &&
+ (id >= MIN_D_CONTEXT) &&
+ (id <= MAX_S_CONTEXT))
+ return 1;
+ if ((mode & VX_STATIC) &&
+ (id > 1) && (id < MIN_D_CONTEXT))
+ return 1;
+ }
+ return (((mode & VX_ADMIN) && (cid == 0)) ||
+ ((mode & VX_WATCH) && (cid == 1)));
+}
+
+
+#define __vx_flags(v,m,f) (((v) & (m)) ^ (f))
+
+#define __vx_task_flags(t,m,f) \
+ (((t) && ((t)->vx_info)) ? \
+ __vx_flags((t)->vx_info->vx_flags,(m),(f)) : 0)
+
+#define vx_current_flags() \
+ ((current->vx_info) ? current->vx_info->vx_flags : 0)
+
+#define vx_flags(m,f) __vx_flags(vx_current_flags(),(m),(f))
+
+
+#define vx_current_ccaps() \
+ ((current->vx_info) ? current->vx_info->vx_ccaps : 0)
+
+#define vx_ccaps(c) (vx_current_ccaps() & (c))
+
+#define vx_current_bcaps() \
+ (((current->vx_info) && !vx_flags(VXF_STATE_SETUP, 0)) ? \
+ current->vx_info->vx_bcaps : cap_bset)
+
+
+#define VX_DEBUG_ACC_RSS 0
+#define VX_DEBUG_ACC_VM 0
+#define VX_DEBUG_ACC_VML 0
+
+#undef vxdprintk
+#if (VX_DEBUG_ACC_RSS) || (VX_DEBUG_ACC_VM) || (VX_DEBUG_ACC_VML)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
+#define vx_acc_page(m, d, v, r) \
+ __vx_acc_page(&(m->v), m->mm_vx_info, r, d, __FILE__, __LINE__)
+
+static inline void __vx_acc_page(unsigned long *v, struct vx_info *vxi,
+ int res, int dir, char *file, int line)
+{
+ if (v) {
+ if (dir > 0)
+ ++(*v);
+ else
+ --(*v);
+ }
+ if (vxi) {
+ if (dir > 0)
+ atomic_inc(&vxi->limit.res[res]);
+ else
+ atomic_dec(&vxi->limit.res[res]);
+ }
+}
+
+
+#define vx_acc_pages(m, p, v, r) \
+ __vx_acc_pages(&(m->v), m->mm_vx_info, r, p, __FILE__, __LINE__)
+
+static inline void __vx_acc_pages(unsigned long *v, struct vx_info *vxi,
+ int res, int pages, char *file, int line)
+{
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ vxdprintk("vx_acc_pages [%5d,%2d]: %5d += %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ pages, file, line);
+ if (pages == 0)
+ return;
+ if (v)
+ *v += pages;
+ if (vxi)
+ atomic_add(pages, &vxi->limit.res[res]);
+}
+
+
+
+#define vx_acc_vmpage(m,d) vx_acc_page(m, d, total_vm, RLIMIT_AS)
+#define vx_acc_vmlpage(m,d) vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_rsspage(m,d) vx_acc_page(m, d, rss, RLIMIT_RSS)
+
+#define vx_acc_vmpages(m,p) vx_acc_pages(m, p, total_vm, RLIMIT_AS)
+#define vx_acc_vmlpages(m,p) vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_rsspages(m,p) vx_acc_pages(m, p, rss, RLIMIT_RSS)
+
+#define vx_pages_add(s,r,p) __vx_acc_pages(0, s, r, p, __FILE__, __LINE__)
+#define vx_pages_sub(s,r,p) __vx_pages_add(s, r, -(p))
+
+#define vx_vmpages_inc(m) vx_acc_vmpage(m, 1)
+#define vx_vmpages_dec(m) vx_acc_vmpage(m,-1)
+#define vx_vmpages_add(m,p) vx_acc_vmpages(m, p)
+#define vx_vmpages_sub(m,p) vx_acc_vmpages(m,-(p))
+
+#define vx_vmlocked_inc(m) vx_acc_vmlpage(m, 1)
+#define vx_vmlocked_dec(m) vx_acc_vmlpage(m,-1)
+#define vx_vmlocked_add(m,p) vx_acc_vmlpages(m, p)
+#define vx_vmlocked_sub(m,p) vx_acc_vmlpages(m,-(p))
+
+#define vx_rsspages_inc(m) vx_acc_rsspage(m, 1)
+#define vx_rsspages_dec(m) vx_acc_rsspage(m,-1)
+#define vx_rsspages_add(m,p) vx_acc_rsspages(m, p)
+#define vx_rsspages_sub(m,p) vx_acc_rsspages(m,-(p))
+
+
+
+#define vx_pages_avail(m, p, r) \
+ __vx_pages_avail((m)->mm_vx_info, (r), (p), __FILE__, __LINE__)
+
+static inline int __vx_pages_avail(struct vx_info *vxi,
+ int res, int pages, char *file, int line)
+{
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ printk("vx_pages_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?vxi->limit.rlim[res]:1),
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ pages, file, line);
+ if (!vxi)
+ return 1;
+ if (vxi->limit.rlim[res] == RLIM_INFINITY)
+ return 1;
+ if (atomic_read(&vxi->limit.res[res]) + pages < vxi->limit.rlim[res])
+ return 1;
+ return 0;
+}
+
+#define vx_vmpages_avail(m,p) vx_pages_avail(m, p, RLIMIT_AS)
+#define vx_vmlocked_avail(m,p) vx_pages_avail(m, p, RLIMIT_MEMLOCK)
+#define vx_rsspages_avail(m,p) vx_pages_avail(m, p, RLIMIT_RSS)
+
+/* file limits */
+
+#define VX_DEBUG_ACC_FILE 0
+#define VX_DEBUG_ACC_OPENFD 0
+
+#undef vxdprintk
+#if (VX_DEBUG_ACC_FILE) || (VX_DEBUG_ACC_OPENFD)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
+
+#define vx_acc_cres(v,d,r) \
+ __vx_acc_cres((v), (r), (d), __FILE__, __LINE__)
+
+static inline void __vx_acc_cres(struct vx_info *vxi,
+ int res, int dir, char *file, int line)
+{
+ if (vxi) {
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_acc_cres[%5d,%2d]: %5d%s in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ (dir>0)?"++":"--", file, line);
+ if (dir > 0)
+ atomic_inc(&vxi->limit.res[res]);
+ else
+ atomic_dec(&vxi->limit.res[res]);
+ }
+}
+
+#define vx_files_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_NOFILE)
+#define vx_files_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_NOFILE)
+
+#define vx_openfd_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_OPENFD)
+#define vx_openfd_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_OPENFD)
+
+#define vx_cres_avail(v,n,r) \
+ __vx_cres_avail((v), (r), (n), __FILE__, __LINE__)
+
+static inline int __vx_cres_avail(struct vx_info *vxi,
+ int res, int num, char *file, int line)
+{
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_cres_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?vxi->limit.rlim[res]:1),
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ num, file, line);
+ if (!vxi)
+ return 1;
+ if (vxi->limit.rlim[res] == RLIM_INFINITY)
+ return 1;
+ if (vxi->limit.rlim[res] < atomic_read(&vxi->limit.res[res]) + num)
+ return 0;
+ return 1;
+}
+
+#define vx_files_avail(n) \
+ vx_cres_avail(current->vx_info, (n), RLIMIT_NOFILE)
+
+#define vx_openfd_avail(n) \
+ vx_cres_avail(current->vx_info, (n), RLIMIT_OPENFD)
+
+/* socket limits */
+
+#define vx_sock_inc(f) vx_acc_cres(current->vx_info, 1, VLIMIT_SOCK)
+#define vx_sock_dec(f) vx_acc_cres(current->vx_info,-1, VLIMIT_SOCK)
+
+#define vx_sock_avail(n) \
+ vx_cres_avail(current->vx_info, (n), VLIMIT_SOCK)
+
+/* procfs ioctls */
+
+#define FIOC_GETXFLG _IOR('x', 5, long)
+#define FIOC_SETXFLG _IOW('x', 6, long)
+
+/* utsname virtualization */
+
+static inline struct new_utsname *vx_new_utsname(void)
+{
+ if (current->vx_info)
+ return ¤t->vx_info->cvirt.utsname;
+ return &system_utsname;
+}
+
+#define vx_new_uts(x) ((vx_new_utsname())->x)
+
+/* generic flag merging */
+
+#define vx_mask_flags(v,f,m) (((v) & ~(m)) | ((f) & (m)))
+
+#define vx_mask_mask(v,f,m) (((v) & ~(m)) | ((v) & (f) & (m)))
+
+
+/* socket accounting */
+
+#include <linux/socket.h>
+
+static inline int vx_sock_type(int family)
+{
+ int type = 4;
+
+ if (family > 0 && family < 3)
+ type = family;
+ else if (family == PF_INET6)
+ type = 3;
+ return type;
+}
+
+#define vx_acc_sock(v,f,p,s) \
+ __vx_acc_sock((v), (f), (p), (s), __FILE__, __LINE__)
+
+static inline void __vx_acc_sock(struct vx_info *vxi,
+ int family, int pos, int size, char *file, int line)
+{
+ if (vxi) {
+ int type = vx_sock_type(family);
+
+ atomic_inc(&vxi->cacct.sock[type][pos].count);
+ atomic_add(size, &vxi->cacct.sock[type][pos].total);
+ }
+}
+
+#define vx_sock_recv(sk,s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, (s))
+#define vx_sock_send(sk,s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, (s))
+#define vx_sock_fail(sk,s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, (s))
+
+
+#define sock_vx_init(s) do { \
+ (s)->sk_xid = 0; \
+ (s)->sk_vx_info = NULL; \
+ } while (0)
+
+
+/* pid faking stuff */
+
+
+#define vx_map_tgid(v,p) \
+ __vx_map_tgid((v), (p), __FILE__, __LINE__)
+
+static inline int __vx_map_tgid(struct vx_info *vxi, int pid,
+ char *file, int line)
+{
+ if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
+ vxdprintk("vx_map_tgid: %p/%llx: %d -> %d in %s:%d\n",
+ vxi, vxi->vx_flags, pid,
+ (pid == vxi->vx_initpid)?1:pid,
+ file, line);
+ if (pid == vxi->vx_initpid)
+ return 1;
+ }
+ return pid;
+}
+
+#define vx_rmap_tgid(v,p) \
+ __vx_rmap_tgid((v), (p), __FILE__, __LINE__)
+
+static inline int __vx_rmap_tgid(struct vx_info *vxi, int pid,
+ char *file, int line)
+{
+ if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
+ vxdprintk("vx_rmap_tgid: %p/%llx: %d -> %d in %s:%d\n",
+ vxi, vxi->vx_flags, pid,
+ (pid == 1)?vxi->vx_initpid:pid,
+ file, line);
+ if ((pid == 1) && vxi->vx_initpid)
+ return vxi->vx_initpid;
+ }
+ return pid;
+}
+
+#undef vxdprintk
+#define vxdprintk(x...)
+
+#endif
#include "vserver/context.h"
+// #define VX_DEBUG
+
+
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
#define vx_task_xid(t) ((t)->xid)
#define _VX_VS_CONTEXT_H
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
-#include "vserver/debug.h"
+
+#undef vxdprintk
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
extern int proc_pid_vx_info(struct task_struct *, char *);
{
if (!vxi)
return NULL;
- vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
+ vxdprintk("get_vx_info(%p[#%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
_file, _line);
atomic_inc(&vxi->vx_usecnt);
{
if (!vxi)
return;
- vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
+ vxdprintk("put_vx_info(%p[#%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&vxi->vx_usecnt))
BUG_ON(*vxp);
if (!vxi)
return;
- vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d.%d])",
+ vxdprintk("set_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0,
vxi?atomic_read(&vxi->vx_usecnt):0,
vxi?atomic_read(&vxi->vx_refcnt):0,
if (!vxo)
return;
- vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d.%d])",
+ vxdprintk("clr_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
vxo, vxo?vxo->vx_id:0,
vxo?atomic_read(&vxo->vx_usecnt):0,
vxo?atomic_read(&vxo->vx_refcnt):0,
struct vx_info *vxi;
task_lock(p);
- vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
- p, _file, _line);
vxi = __get_vx_info(p->vx_info, _file, _line);
task_unlock(p);
return vxi;
}
+#undef vxdprintk
+#define vxdprintk(x...)
+
#else
#warning duplicate inclusion
#endif
#ifndef _VX_VS_CVIRT_H
#define _VX_VS_CVIRT_H
+
+// #define VX_DEBUG
+
#include "vserver/cvirt.h"
-#include "vserver/debug.h"
#include "vs_base.h"
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
/* utsname virtualization */
char *file, int line)
{
if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
- vxlprintk(VXD_CBIT(cvirt, 2),
- "vx_map_tgid: %p/%llx: %d -> %d",
+ vxdprintk("vx_map_tgid: %p/%llx: %d -> %d in %s:%d\n",
vxi, vxi->vx_flags, pid,
(pid == vxi->vx_initpid)?1:pid,
file, line);
char *file, int line)
{
if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
- vxlprintk(VXD_CBIT(cvirt, 2),
- "vx_rmap_tgid: %p/%llx: %d -> %d",
+ vxdprintk("vx_rmap_tgid: %p/%llx: %d -> %d in %s:%d\n",
vxi, vxi->vx_flags, pid,
(pid == 1)?vxi->vx_initpid:pid,
file, line);
return pid;
}
+#undef vxdprintk
+#define vxdprintk(x...)
#else
#warning duplicate inclusion
#ifndef _VX_VS_DLIMIT_H
#define _VX_VS_DLIMIT_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/dlimit.h"
-#include "vserver/debug.h"
+
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
#define get_dl_info(i) __get_dl_info(i,__FILE__,__LINE__)
{
if (!dli)
return NULL;
- vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
+ vxdprintk("get_dl_info(%p[#%d.%d])\t%s:%d\n",
dli, dli?dli->dl_xid:0, dli?atomic_read(&dli->dl_usecnt):0,
_file, _line);
atomic_inc(&dli->dl_usecnt);
#define put_dl_info(i) __put_dl_info(i,__FILE__,__LINE__)
-static inline void __put_dl_info(struct dl_info *dli,
- const char *_file, int _line)
+static inline void __put_dl_info(struct dl_info *dli, const char *_file, int _line)
{
if (!dli)
return;
- vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
+ vxdprintk("put_dl_info(%p[#%d.%d])\t%s:%d\n",
dli, dli?dli->dl_xid:0, dli?atomic_read(&dli->dl_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&dli->dl_usecnt))
}
+extern int vx_debug_dlimit;
+
#define __dlimit_char(d) ((d)?'*':' ')
static inline int __dl_alloc_space(struct super_block *sb,
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 1),
- "ALLOC (%p,#%d)%c %lld bytes (%d)",
- sb, xid, __dlimit_char(dli), nr, ret, file, line);
+ if (vx_debug_dlimit)
+ printk("ALLOC (%p,#%d)%c %lld bytes (%d)@ %s:%d\n",
+ sb, xid, __dlimit_char(dli), nr, ret, file, line);
return ret;
}
static inline void __dl_free_space(struct super_block *sb,
- xid_t xid, dlsize_t nr, const char *_file, int _line)
+ xid_t xid, dlsize_t nr, const char *file, int line)
{
struct dl_info *dli = NULL;
goto out;
spin_lock(&dli->dl_lock);
- if (dli->dl_space_used > nr)
- dli->dl_space_used -= nr;
- else
- dli->dl_space_used = 0;
+ dli->dl_space_used -= nr;
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 1),
- "FREE (%p,#%d)%c %lld bytes",
- sb, xid, __dlimit_char(dli), nr, _file, _line);
+ if (vx_debug_dlimit)
+ printk("FREE (%p,#%d)%c %lld bytes @ %s:%d\n",
+ sb, xid, __dlimit_char(dli), nr, file, line);
}
static inline int __dl_alloc_inode(struct super_block *sb,
- xid_t xid, const char *_file, int _line)
+ xid_t xid, const char *file, int line)
{
struct dl_info *dli;
int ret = 0;
ret = (dli->dl_inodes_used >= dli->dl_inodes_total);
if (!ret)
dli->dl_inodes_used++;
-#if 0
- else
- printk("VSW: DLIMIT hit (%p,#%d), inode %d>=%d @ %s:%d\n",
- sb, xid,
- dli->dl_inodes_used, dli->dl_inodes_total,
- file, line);
-#endif
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 0),
- "ALLOC (%p,#%d)%c inode (%d)",
- sb, xid, __dlimit_char(dli), ret, _file, _line);
+ if (vx_debug_dlimit)
+ printk("ALLOC (%p,#%d)%c inode (%d)@ %s:%d\n",
+ sb, xid, __dlimit_char(dli), ret, file, line);
return ret;
}
static inline void __dl_free_inode(struct super_block *sb,
- xid_t xid, const char *_file, int _line)
+ xid_t xid, const char *file, int line)
{
struct dl_info *dli;
goto out;
spin_lock(&dli->dl_lock);
- if (dli->dl_inodes_used > 1)
- dli->dl_inodes_used--;
- else
- dli->dl_inodes_used = 0;
+ dli->dl_inodes_used--;
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 0),
- "FREE (%p,#%d)%c inode",
- sb, xid, __dlimit_char(dli), _file, _line);
+ if (vx_debug_dlimit)
+ printk("FREE (%p,#%d)%c inode @ %s:%d\n",
+ sb, xid, __dlimit_char(dli), file, line);
}
-static inline void __dl_adjust_block(struct super_block *sb, xid_t xid,
- unsigned int *free_blocks, unsigned int *root_blocks,
- const char *_file, int _line)
-{
- struct dl_info *dli;
- uint64_t broot, bfree;
-
- dli = locate_dl_info(sb, xid);
- if (!dli)
- return;
-
- spin_lock(&dli->dl_lock);
- broot = (dli->dl_space_total -
- (dli->dl_space_total >> 10) * dli->dl_nrlmult)
- >> sb->s_blocksize_bits;
- bfree = (dli->dl_space_total - dli->dl_space_used)
- >> sb->s_blocksize_bits;
- spin_unlock(&dli->dl_lock);
-
- vxlprintk(VXD_CBIT(dlim, 2),
- "ADJUST: %lld,%lld on %d,%d [mult=%d]",
- bfree, broot, *free_blocks, *root_blocks,
- dli->dl_nrlmult, _file, _line);
- if (free_blocks) {
- if (*free_blocks > bfree)
- *free_blocks = bfree;
- }
- if (root_blocks) {
- if (*root_blocks > broot)
- *root_blocks = broot;
- }
- put_dl_info(dli);
-}
#define DLIMIT_ALLOC_BLOCK(sb, xid, nr) \
__dl_free_inode(sb, xid, __FILE__, __LINE__ )
-#define DLIMIT_ADJUST_BLOCK(sb, xid, fb, rb) \
- __dl_adjust_block(sb, xid, fb, rb, __FILE__, __LINE__ )
+#define DLIMIT_ADJUST_BLOCK(sb, xid, fb, rb)
#else
#ifndef _VX_VS_LIMIT_H
#define _VX_VS_LIMIT_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/limit.h"
-#include "vserver/debug.h"
/* file limits */
+#define VX_DEBUG_ACC_FILE 0
+#define VX_DEBUG_ACC_OPENFD 0
+
+#if (VX_DEBUG_ACC_FILE) || (VX_DEBUG_ACC_OPENFD)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
+
+#define vx_acc_cres(v,d,r) \
+ __vx_acc_cres((v), (r), (d), __FILE__, __LINE__)
static inline void __vx_acc_cres(struct vx_info *vxi,
- int res, int dir, void *_data, char *_file, int _line)
+ int res, int dir, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_NOFILE) ||
- VXD_RLIMIT(res, RLIMIT_NPROC) ||
- VXD_RLIMIT(res, VLIMIT_NSOCK))
- vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5d%s (%p)",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- (dir>0)?"++":"--", _data, _file, _line);
if (vxi) {
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_acc_cres[%5d,%2d]: %5d%s in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.rcur[res]):0),
+ (dir>0)?"++":"--", file, line);
if (dir > 0)
atomic_inc(&vxi->limit.rcur[res]);
else
}
}
-#define vx_acc_cres(v,d,p,r) \
- __vx_acc_cres((v), (r), (d), (p), __FILE__, __LINE__)
+#define vx_nproc_inc(p) vx_acc_cres(current->vx_info, 1, RLIMIT_NPROC)
+#define vx_nproc_dec(p) vx_acc_cres(current->vx_info,-1, RLIMIT_NPROC)
-#define vx_nproc_inc(p) \
- vx_acc_cres(current->vx_info, 1, (p), RLIMIT_NPROC)
-#define vx_nproc_dec(p) \
- vx_acc_cres(current->vx_info,-1, (p), RLIMIT_NPROC)
+#define vx_files_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_NOFILE)
+#define vx_files_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_NOFILE)
-#define vx_files_inc(f) \
- vx_acc_cres(current->vx_info, 1, (f), RLIMIT_NOFILE)
-#define vx_files_dec(f) \
- vx_acc_cres(current->vx_info,-1, (f), RLIMIT_NOFILE)
+#define vx_openfd_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_OPENFD)
+#define vx_openfd_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_OPENFD)
/*
#define vx_openfd_inc(f) do { \
__vx_cres_avail((v), (r), (n), __FILE__, __LINE__)
static inline int __vx_cres_avail(struct vx_info *vxi,
- int res, int num, char *_file, int _line)
+ int res, int num, char *file, int line)
{
unsigned long value;
- if (VXD_RLIMIT(res, RLIMIT_NOFILE) ||
- VXD_RLIMIT(res, RLIMIT_NPROC) ||
- VXD_RLIMIT(res, VLIMIT_NSOCK))
- vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_cres_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
(vxi?vxi->limit.rlim[res]:1),
(vxi?atomic_read(&vxi->limit.rcur[res]):0),
- num, _file, _line);
+ num, file, line);
if (!vxi)
return 1;
value = atomic_read(&vxi->limit.rcur[res]);
#define vx_files_avail(n) \
vx_cres_avail(current->vx_info, (n), RLIMIT_NOFILE)
+#define vx_openfd_avail(n) \
+ vx_cres_avail(current->vx_info, (n), RLIMIT_OPENFD)
+
/* socket limits */
-#define vx_sock_inc(s) \
- vx_acc_cres((s)->sk_vx_info, 1, (s), VLIMIT_NSOCK)
-#define vx_sock_dec(s) \
- vx_acc_cres((s)->sk_vx_info,-1, (s), VLIMIT_NSOCK)
+#define vx_sock_inc(f) vx_acc_cres(current->vx_info, 1, VLIMIT_SOCK)
+#define vx_sock_dec(f) vx_acc_cres(current->vx_info,-1, VLIMIT_SOCK)
#define vx_sock_avail(n) \
- vx_cres_avail(current->vx_info, (n), VLIMIT_NSOCK)
+ vx_cres_avail(current->vx_info, (n), VLIMIT_SOCK)
+
#else
#warning duplicate inclusion
#ifndef _VX_VS_MEMORY_H
#define _VX_VS_MEMORY_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/limit.h"
-#include "vserver/debug.h"
+#define VX_DEBUG_ACC_RSS 0
+#define VX_DEBUG_ACC_VM 0
+#define VX_DEBUG_ACC_VML 0
+
+#if (VX_DEBUG_ACC_RSS) || (VX_DEBUG_ACC_VM) || (VX_DEBUG_ACC_VML)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
#define vx_acc_page(m, d, v, r) \
__vx_acc_page(&(m->v), m->mm_vx_info, r, d, __FILE__, __LINE__)
static inline void __vx_acc_page(unsigned long *v, struct vx_info *vxi,
int res, int dir, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_acc_page[%5d,%s,%2d]: %5d%s",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- (dir?"++":"--"), file, line);
if (v) {
if (dir > 0)
++(*v);
__vx_acc_pages(&(m->v), m->mm_vx_info, r, p, __FILE__, __LINE__)
static inline void __vx_acc_pages(unsigned long *v, struct vx_info *vxi,
- int res, int pages, char *_file, int _line)
+ int res, int pages, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_acc_pages[%5d,%s,%2d]: %5d += %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- pages, _file, _line);
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ vxdprintk("vx_acc_pages [%5d,%2d]: %5d += %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ pages, file, line);
if (pages == 0)
return;
if (v)
#define vx_acc_rsspages(m,p) vx_acc_pages(m, p, rss, RLIMIT_RSS)
#define vx_pages_add(s,r,p) __vx_acc_pages(0, s, r, p, __FILE__, __LINE__)
-#define vx_pages_sub(s,r,p) vx_pages_add(s, r, -(p))
+#define vx_pages_sub(s,r,p) __vx_pages_add(s, r, -(p))
#define vx_vmpages_inc(m) vx_acc_vmpage(m, 1)
#define vx_vmpages_dec(m) vx_acc_vmpage(m,-1)
__vx_pages_avail((m)->mm_vx_info, (r), (p), __FILE__, __LINE__)
static inline int __vx_pages_avail(struct vx_info *vxi,
- int res, int pages, char *_file, int _line)
+ int res, int pages, char *file, int line)
{
unsigned long value;
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ printk("vx_pages_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
(vxi?vxi->limit.rlim[res]:1),
(vxi?atomic_read(&vxi->limit.rcur[res]):0),
- pages, _file, _line);
+ pages, file, line);
if (!vxi)
return 1;
value = atomic_read(&vxi->limit.rcur[res]);
#ifndef _NX_VS_NETWORK_H
#define _NX_VS_NETWORK_H
+
+// #define NX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/network.h"
-#include "vserver/debug.h"
+
+#if defined(NX_DEBUG)
+#define nxdprintk(x...) printk("nxd: " x)
+#else
+#define nxdprintk(x...)
+#endif
extern int proc_pid_nx_info(struct task_struct *, char *);
{
if (!nxi)
return NULL;
- vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
+ nxdprintk("get_nx_info(%p[#%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
atomic_inc(&nxi->nx_usecnt);
}
-#define free_nx_info(i) \
- call_rcu(&i->nx_rcu, rcu_free_nx_info);
+#define free_nx_info(nxi) \
+ call_rcu(&nxi->nx_rcu, rcu_free_nx_info);
#define put_nx_info(i) __put_nx_info(i,__FILE__,__LINE__)
{
if (!nxi)
return;
- vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
+ nxdprintk("put_nx_info(%p[#%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&nxi->nx_usecnt))
BUG_ON(*nxp);
if (!nxi)
return;
- vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d.%d])",
+ nxdprintk("set_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0,
nxi?atomic_read(&nxi->nx_usecnt):0,
nxi?atomic_read(&nxi->nx_refcnt):0,
if (!nxo)
return;
- vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d.%d])",
+ nxdprintk("clr_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
nxo, nxo?nxo->nx_id:0,
nxo?atomic_read(&nxo->nx_usecnt):0,
nxo?atomic_read(&nxo->nx_refcnt):0,
task_lock(p);
nxi = __get_nx_info(p->nx_info, _file, _line);
- vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
- p, _file, _line);
task_unlock(p);
return nxi;
}
#define nx_weak_check(c,m) ((m) ? nx_check(c,m) : 1)
+#undef nxdprintk
+#define nxdprintk(x...)
+
#define __nx_flags(v,m,f) (((v) & (m)) ^ (f))
-#ifndef _VX_VS_SOCKET_H
-#define _VX_VS_SOCKET_H
+#ifndef _VX_VS_LIMIT_H
+#define _VX_VS_LIMIT_H
// #define VX_DEBUG
#include "vserver/context.h"
#include "vserver/network.h"
-#include "vserver/debug.h"
/* socket accounting */
#define VX_ATR_MASK 0x0F00
-struct rcu_head;
-
extern void rcu_free_vx_info(struct rcu_head *);
extern void unhash_vx_info(struct vx_info *);
-/* _VX_CVIRT_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <linux/utsname.h>
struct _vx_cvirt {
int max_threads;
+ unsigned int bias_cswtch;
struct timespec bias_idle;
+ struct timespec bias_tp;
uint64_t bias_jiffies;
struct new_utsname utsname;
{
uint64_t idle_jiffies = vx_idle_jiffies();
+ // new->virt.bias_cswtch = kstat.context_swtch;
cvirt->bias_jiffies = get_jiffies_64();
+
jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
+ do_posix_clock_monotonic_gettime(&cvirt->bias_tp);
down_read(&uts_sem);
cvirt->utsname = system_utsname;
static inline int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
{
int length = 0;
- length += sprintf(buffer + length,
- "BiasJiffies:\t%lld\n", (long long int)cvirt->bias_jiffies);
- length += sprintf(buffer + length,
- "SysName:\t%.*s\n"
- "NodeName:\t%.*s\n"
- "Release:\t%.*s\n"
- "Version:\t%.*s\n"
- "Machine:\t%.*s\n"
- "DomainName:\t%.*s\n"
- ,__NEW_UTS_LEN, cvirt->utsname.sysname
- ,__NEW_UTS_LEN, cvirt->utsname.nodename
- ,__NEW_UTS_LEN, cvirt->utsname.release
- ,__NEW_UTS_LEN, cvirt->utsname.version
- ,__NEW_UTS_LEN, cvirt->utsname.machine
- ,__NEW_UTS_LEN, cvirt->utsname.domainname
- );
return length;
}
+++ /dev/null
-#ifndef _VX_DEBUG_H
-#define _VX_DEBUG_H
-
-
-extern unsigned int vx_debug_switch;
-extern unsigned int vx_debug_xid;
-extern unsigned int vx_debug_nid;
-extern unsigned int vx_debug_net;
-extern unsigned int vx_debug_limit;
-extern unsigned int vx_debug_dlim;
-extern unsigned int vx_debug_cvirt;
-
-
-#define VXD_CBIT(n,m) (vx_debug_ ## n & (1 << (m)))
-#define VXD_CMIN(n,m) (vx_debug_ ## n > (m))
-#define VXD_MASK(n,m) (vx_debug_ ## n & (m))
-
-// #define VXD_HERE __FILE__, __LINE__
-
-
-#ifdef CONFIG_VSERVER_DEBUG
-
-#define VX_LOGLEVEL "vxD: "
-
-#define vxdprintk(c,f,x...) \
- do { \
- if (c) \
- printk(VX_LOGLEVEL f "\n", x); \
- } while (0)
-
-#define vxlprintk(c,f,x...) \
- do { \
- if (c) \
- printk(VX_LOGLEVEL f " @%s:%d\n", x); \
- } while (0)
-
-#else
-
-#define vxdprintk(x...) do { } while (0)
-#define vxlprintk(x...) do { } while (0)
-
-#endif
-
-
-
-#endif /* _VX_DEBUG_H */
unsigned int dl_nrlmult; /* non root limit mult */
};
-struct rcu_head;
-
extern void rcu_free_dl_info(struct rcu_head *);
extern void unhash_dl_info(struct dl_info *);
#define IATTR_IMMUTABLE 0x00040000
-#ifdef CONFIG_VSERVER_PROC_SECURE
+#ifdef CONFIG_PROC_SECURE
#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE )
#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
#else
extern int vc_get_iattr(uint32_t, void __user *);
extern int vc_set_iattr(uint32_t, void __user *);
-extern int vc_iattr_ioctl(struct dentry *de,
- unsigned int cmd,
- unsigned long arg);
-
#endif /* __KERNEL__ */
/* inode ioctls */
#define FIOC_GETXFLG _IOR('x', 5, long)
#define FIOC_SETXFLG _IOW('x', 6, long)
-#define FIOC_GETIATTR _IOR('x', 7, long)
-#define FIOC_SETIATTR _IOR('x', 8, long)
-
#endif /* _VX_INODE_H */
-/* _VX_LIMIT_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <asm/atomic.h>
/* context sub struct */
-#define NUM_LIMITS 20
+#define RLIMIT_OPENFD 12
+
+#define NUM_RLIMITS 16
-#define VLIMIT_NSOCK 16
+#define VLIMIT_SOCK 16
-extern const char *vlimit_name[NUM_LIMITS];
struct _vx_limit {
atomic_t ticks;
- unsigned long rlim[NUM_LIMITS]; /* Context limit */
- unsigned long rmax[NUM_LIMITS]; /* Context maximum */
- atomic_t rcur[NUM_LIMITS]; /* Current value */
- atomic_t lhit[NUM_LIMITS]; /* Limit hits */
+ unsigned long rlim[NUM_RLIMITS]; /* Context limit */
+ unsigned long rmax[NUM_RLIMITS]; /* Context maximum */
+ atomic_t rcur[NUM_RLIMITS]; /* Current value */
+ atomic_t lhit[NUM_RLIMITS]; /* Limit hits */
};
static inline void vx_info_init_limit(struct _vx_limit *limit)
{
int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
limit->rlim[lim] = RLIM_INFINITY;
limit->rmax[lim] = 0;
atomic_set(&limit->rcur[lim], 0);
}
}
+extern unsigned int vx_debug_limit;
+
static inline void vx_info_exit_limit(struct _vx_limit *limit)
{
-#ifdef CONFIG_VSERVER_DEBUG
unsigned long value;
unsigned int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ if (!vx_debug_limit)
+ return;
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
value = atomic_read(&limit->rcur[lim]);
if (value)
- printk("!!! limit: %p[%s,%d] = %ld on exit.\n",
- limit, vlimit_name[lim], lim, value);
+ printk("!!! limit: %p[%d] = %ld on exit.\n",
+ limit, lim, value);
}
-#endif
}
static inline void vx_limit_fixup(struct _vx_limit *limit)
unsigned long value;
unsigned int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
value = atomic_read(&limit->rcur[lim]);
if (value > limit->rmax[lim])
limit->rmax[lim] = value;
"VML" VX_LIMIT_FMT
"RSS" VX_LIMIT_FMT
"FILES" VX_LIMIT_FMT
- "SOCK" VX_LIMIT_FMT
+ "OFD" VX_LIMIT_FMT
VX_LIMIT_ARG(RLIMIT_NPROC)
VX_LIMIT_ARG(RLIMIT_AS)
VX_LIMIT_ARG(RLIMIT_MEMLOCK)
VX_LIMIT_ARG(RLIMIT_RSS)
VX_LIMIT_ARG(RLIMIT_NOFILE)
- VX_LIMIT_ARG(VLIMIT_NSOCK)
+ VX_LIMIT_ARG(RLIMIT_OPENFD)
);
}
#include "switch.h"
-#define VXD_RLIMIT(r,l) (VXD_CBIT(limit, (l)) && ((r) == (l)))
-
/* rlimit vserver commands */
#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
#endif /* _VX_LIMIT_H */
#endif
-
-
};
-struct rcu_head;
-
extern void rcu_free_nx_info(struct rcu_head *);
extern void unhash_nx_info(struct nx_info *);
-/* _VX_SCHED_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <linux/cpumask.h>
#include <asm/atomic.h>
#include <asm/param.h>
+#include <linux/cpumask.h>
/* context sub struct */
/* interface version */
-#define VCI_VERSION 0x00010020
+#define VCI_VERSION 0x00010016
/* query version */
-#ifndef _VX_XID_H
-#define _VX_XID_H
-
-
-#define XID_TAG(in) (!(in) || \
- (((struct inode *)in)->i_sb && \
- (((struct inode *)in)->i_sb->s_flags & MS_TAGXID)))
-
+#ifndef _LINUX_XID_H_
+#define _LINUX_XID_H_
#ifdef CONFIG_INOXID_NONE
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) (0)
+#define INOXID_XID(uid, gid, xid) (0)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0x0000FFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (((gid) >> 16) & 0xFFFF) : 0)
+#define INOXID_XID(uid, gid, xid) (((gid) >> 16) & 0xFFFF)
+
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (((gid) & 0xFFFF) | ((xid) << 16))
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFF) | ((xid) << 16)) : (gid))
#endif
-#ifdef CONFIG_INOXID_UGID24
+#ifdef CONFIG_INOXID_GID24
#define MAX_UID 0x00FFFFFF
#define MAX_GID 0x00FFFFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+#define INOXID_XID(uid, gid, xid) ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF))
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFFFF) | (((xid) & 0xFF00) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFFFF) | (((xid) & 0x00FF) << 24)) : (gid))
+#define XIDINO_UID(uid, xid) (((uid) & 0xFFFFFF) | (((xid) & 0xFF00) << 16))
+#define XIDINO_GID(gid, xid) (((gid) & 0xFFFFFF) | (((xid) & 0x00FF) << 24))
#endif
-#ifdef CONFIG_INOXID_UID16
-
-#define MAX_UID 0x0000FFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? ((uid) >> 16) & 0xFFFF) : 0)
-
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFF) | ((xid) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifdef CONFIG_INOXID_INTERN
+#ifdef CONFIG_INOXID_GID32
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (xid) : 0)
+#define INOXID_XID(uid, gid, xid) (xid)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) (0)
+#define INOXID_XID(uid, gid, xid) (0)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
-#define INOXID_UID(tag, uid, gid) \
- ((tag) ? ((uid) & MAX_UID) : (uid))
-#define INOXID_GID(tag, uid, gid) \
- ((tag) ? ((gid) & MAX_GID) : (gid))
-
+#define INOXID_UID(uid, gid) ((uid) & MAX_UID)
+#define INOXID_GID(uid, gid) ((gid) & MAX_GID)
static inline uid_t vx_map_uid(uid_t uid)
{
#define FIOC_SETXIDJ _IOW('x', 3, long)
#endif
-#endif /* _VX_XID_H */
+#endif /* _LINUX_XID_H_ */
void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(int currcons, struct console_font_op *op);
-int con_font_set(int currcons, struct console_font_op *op);
-int con_font_get(int currcons, struct console_font_op *op);
-int con_font_default(int currcons, struct console_font_op *op);
-int con_font_copy(int currcons, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
void scrollback(int);
struct ctl_table;
struct file;
int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+++ /dev/null
-/*
- * $Id: inftl-user.h,v 1.1 2004/05/05 15:17:00 dwmw2 Exp $
- *
- * Parts of INFTL headers shared with userspace
- *
- */
-
-#ifndef __MTD_INFTL_USER_H__
-#define __MTD_INFTL_USER_H__
-
-#define OSAK_VERSION 0x5120
-#define PERCENTUSED 98
-
-#define SECTORSIZE 512
-
-/* Block Control Information */
-
-struct inftl_bci {
- uint8_t ECCsig[6];
- uint8_t Status;
- uint8_t Status1;
-} __attribute__((packed));
-
-struct inftl_unithead1 {
- uint16_t virtualUnitNo;
- uint16_t prevUnitNo;
- uint8_t ANAC;
- uint8_t NACs;
- uint8_t parityPerField;
- uint8_t discarded;
-} __attribute__((packed));
-
-struct inftl_unithead2 {
- uint8_t parityPerField;
- uint8_t ANAC;
- uint16_t prevUnitNo;
- uint16_t virtualUnitNo;
- uint8_t NACs;
- uint8_t discarded;
-} __attribute__((packed));
-
-struct inftl_unittail {
- uint8_t Reserved[4];
- uint16_t EraseMark;
- uint16_t EraseMark1;
-} __attribute__((packed));
-
-union inftl_uci {
- struct inftl_unithead1 a;
- struct inftl_unithead2 b;
- struct inftl_unittail c;
-};
-
-struct inftl_oob {
- struct inftl_bci b;
- union inftl_uci u;
-};
-
-
-/* INFTL Media Header */
-
-struct INFTLPartition {
- __u32 virtualUnits;
- __u32 firstUnit;
- __u32 lastUnit;
- __u32 flags;
- __u32 spareUnits;
- __u32 Reserved0;
- __u32 Reserved1;
-} __attribute__((packed));
-
-struct INFTLMediaHeader {
- char bootRecordID[8];
- __u32 NoOfBootImageBlocks;
- __u32 NoOfBinaryPartitions;
- __u32 NoOfBDTLPartitions;
- __u32 BlockMultiplierBits;
- __u32 FormatFlags;
- __u32 OsakVersion;
- __u32 PercentUsed;
- struct INFTLPartition Partitions[4];
-} __attribute__((packed));
-
-/* Partition flag types */
-#define INFTL_BINARY 0x20000000
-#define INFTL_BDTL 0x40000000
-#define INFTL_LAST 0x80000000
-
-#endif /* __MTD_INFTL_USER_H__ */
-
-
+++ /dev/null
-/*
- * $Id: jffs2-user.h,v 1.1 2004/05/05 11:57:54 dwmw2 Exp $
- *
- * JFFS2 definitions for use in user space only
- */
-
-#ifndef __JFFS2_USER_H__
-#define __JFFS2_USER_H__
-
-/* This file is blessed for inclusion by userspace */
-#include <linux/jffs2.h>
-#include <endian.h>
-#include <byteswap.h>
-
-#undef cpu_to_je16
-#undef cpu_to_je32
-#undef cpu_to_jemode
-#undef je16_to_cpu
-#undef je32_to_cpu
-#undef jemode_to_cpu
-
-extern int target_endian;
-
-#define t16(x) ({ uint16_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); })
-#define t32(x) ({ uint32_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); })
-
-#define cpu_to_je16(x) ((jint16_t){t16(x)})
-#define cpu_to_je32(x) ((jint32_t){t32(x)})
-#define cpu_to_jemode(x) ((jmode_t){t32(x)})
-
-#define je16_to_cpu(x) (t16((x).v16))
-#define je32_to_cpu(x) (t32((x).v32))
-#define jemode_to_cpu(x) (t32((x).m))
-
-#endif /* __JFFS2_USER_H__ */
+++ /dev/null
-/*
- * $Id: mtd-abi.h,v 1.5 2004/06/22 09:29:35 gleixner Exp $
- *
- * Portions of MTD ABI definition which are shared by kernel and user space
- */
-
-#ifndef __MTD_ABI_H__
-#define __MTD_ABI_H__
-
-struct erase_info_user {
- uint32_t start;
- uint32_t length;
-};
-
-struct mtd_oob_buf {
- uint32_t start;
- uint32_t length;
- unsigned char __user *ptr;
-};
-
-#define MTD_ABSENT 0
-#define MTD_RAM 1
-#define MTD_ROM 2
-#define MTD_NORFLASH 3
-#define MTD_NANDFLASH 4
-#define MTD_PEROM 5
-#define MTD_OTHER 14
-#define MTD_UNKNOWN 15
-
-#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash)
-#define MTD_SET_BITS 2 // Bits can be set
-#define MTD_ERASEABLE 4 // Has an erase function
-#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible
-#define MTD_VOLATILE 16 // Set for RAMs
-#define MTD_XIP 32 // eXecute-In-Place possible
-#define MTD_OOB 64 // Out-of-band data (NAND flash)
-#define MTD_ECC 128 // Device capable of automatic ECC
-
-// Some common devices / combinations of capabilities
-#define MTD_CAP_ROM 0
-#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE)
-#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE)
-#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB)
-#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS)
-
-
-// Types of automatic ECC/Checksum available
-#define MTD_ECC_NONE 0 // No automatic ECC available
-#define MTD_ECC_RS_DiskOnChip 1 // Automatic ECC on DiskOnChip
-#define MTD_ECC_SW 2 // SW ECC for Toshiba & Samsung devices
-
-/* ECC byte placement */
-#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended)
-#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode)
-#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme
-#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read)
-
-struct mtd_info_user {
- uint8_t type;
- uint32_t flags;
- uint32_t size; // Total size of the MTD
- uint32_t erasesize;
- uint32_t oobblock; // Size of OOB blocks (e.g. 512)
- uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
- uint32_t ecctype;
- uint32_t eccsize;
-};
-
-struct region_info_user {
- uint32_t offset; /* At which this region starts,
- * from the beginning of the MTD */
- uint32_t erasesize; /* For this region */
- uint32_t numblocks; /* Number of blocks in this region */
- uint32_t regionindex;
-};
-
-#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
-#define MEMERASE _IOW('M', 2, struct erase_info_user)
-#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
-#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
-#define MEMLOCK _IOW('M', 5, struct erase_info_user)
-#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
-#define MEMGETREGIONCOUNT _IOR('M', 7, int)
-#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
-#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo)
-#define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo)
-#define MEMGETBADBLOCK _IOW('M', 11, loff_t)
-#define MEMSETBADBLOCK _IOW('M', 12, loff_t)
-
-struct nand_oobinfo {
- uint32_t useecc;
- uint32_t eccbytes;
- uint32_t oobfree[8][2];
- uint32_t eccpos[32];
-};
-
-#endif /* __MTD_ABI_H__ */
+++ /dev/null
-/*
- * $Id: mtd-user.h,v 1.2 2004/05/05 14:44:57 dwmw2 Exp $
- *
- * MTD ABI header for use by user space only.
- */
-
-#ifndef __MTD_USER_H__
-#define __MTD_USER_H__
-
-#include <stdint.h>
-
-/* This file is blessed for inclusion by userspace */
-#include <mtd/mtd-abi.h>
-
-typedef struct mtd_info_user mtd_info_t;
-typedef struct erase_info_user erase_info_t;
-typedef struct region_info_user region_info_t;
-typedef struct nand_oobinfo nand_oobinfo_t;
-
-#endif /* __MTD_USER_H__ */
+++ /dev/null
-/*
- * $Id: nftl-user.h,v 1.1 2004/05/05 14:44:57 dwmw2 Exp $
- *
- * Parts of NFTL headers shared with userspace
- *
- */
-
-#ifndef __MTD_NFTL_USER_H__
-#define __MTD_NFTL_USER_H__
-
-/* Block Control Information */
-
-struct nftl_bci {
- unsigned char ECCSig[6];
- uint8_t Status;
- uint8_t Status1;
-}__attribute__((packed));
-
-/* Unit Control Information */
-
-struct nftl_uci0 {
- uint16_t VirtUnitNum;
- uint16_t ReplUnitNum;
- uint16_t SpareVirtUnitNum;
- uint16_t SpareReplUnitNum;
-} __attribute__((packed));
-
-struct nftl_uci1 {
- uint32_t WearInfo;
- uint16_t EraseMark;
- uint16_t EraseMark1;
-} __attribute__((packed));
-
-struct nftl_uci2 {
- uint16_t FoldMark;
- uint16_t FoldMark1;
- uint32_t unused;
-} __attribute__((packed));
-
-union nftl_uci {
- struct nftl_uci0 a;
- struct nftl_uci1 b;
- struct nftl_uci2 c;
-};
-
-struct nftl_oob {
- struct nftl_bci b;
- union nftl_uci u;
-};
-
-/* NFTL Media Header */
-
-struct NFTLMediaHeader {
- char DataOrgID[6];
- uint16_t NumEraseUnits;
- uint16_t FirstPhysicalEUN;
- uint32_t FormattedSize;
- unsigned char UnitSizeFactor;
-} __attribute__((packed));
-
-#define MAX_ERASE_ZONES (8192 - 512)
-
-#define ERASE_MARK 0x3c69
-#define SECTOR_FREE 0xff
-#define SECTOR_USED 0x55
-#define SECTOR_IGNORE 0x11
-#define SECTOR_DELETED 0x00
-
-#define FOLD_MARK_IN_PROGRESS 0x5555
-
-#define ZONE_GOOD 0xff
-#define ZONE_BAD_ORIGINAL 0
-#define ZONE_BAD_MARKED 7
-
-
-#endif /* __MTD_NFTL_USER_H__ */
inet6_ifa_finish_destroy(ifp);
}
-#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt)
-#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt)
+#define __in6_ifa_put(idev) atomic_dec(&(idev)->refcnt)
+#define in6_ifa_hold(idev) atomic_inc(&(idev)->refcnt)
extern void addrconf_forwarding_on(void);
* This will include the IEEE address token on links that support it.
*/
- word = addr->s6_addr32[2] ^ addr->s6_addr32[3];
- word ^= (word >> 16);
+ word = addr->s6_addr[2] ^ addr->s6_addr32[3];
+ word ^= (word>>16);
word ^= (word >> 8);
return ((word ^ (word >> 4)) & 0x0f);
__u8 pscan_period_mode;
__u8 dev_class[3];
__u16 clock_offset;
- __s8 rssi;
+ __u8 rssi;
} __attribute__ ((packed));
#define HCI_EV_CONN_COMPLETE 0x03
#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
+#define ICMP_INC_STATS_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[!in_softirq()],\
+ smp_processor_id()) + offt)))++
+#define ICMP_INC_STATS_BH_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[0], \
+ smp_processor_id()) + offt)))++
+#define ICMP_INC_STATS_USER_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[1], \
+ smp_processor_id()) + offt)))++
extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
extern int icmp_rcv(struct sk_buff *skb);
#include <linux/ip.h>
-enum {
- INET_ECN_NOT_ECT = 0,
- INET_ECN_ECT_1 = 1,
- INET_ECN_ECT_0 = 2,
- INET_ECN_CE = 3,
- INET_ECN_MASK = 3,
-};
-
static inline int INET_ECN_is_ce(__u8 dsfield)
{
- return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
+ return (dsfield&3) == 3;
}
static inline int INET_ECN_is_not_ce(__u8 dsfield)
{
- return (dsfield & INET_ECN_MASK) == INET_ECN_ECT_0;
+ return (dsfield&3) == 2;
}
static inline int INET_ECN_is_capable(__u8 dsfield)
{
- return (dsfield & INET_ECN_ECT_0);
+ return (dsfield&2);
}
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
- outer &= ~INET_ECN_MASK;
+ outer &= ~3;
if (INET_ECN_is_capable(inner))
- outer |= (inner & INET_ECN_MASK);
+ outer |= (inner & 3);
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define INET_ECN_dontxmit(sk) \
- do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= 2; } while (0)
+#define INET_ECN_dontxmit(sk) do { inet_sk(sk)->tos &= ~3; } while (0)
-#define IP6_ECN_flow_init(label) do { \
- (label) &= ~htonl(INET_ECN_MASK << 20); \
+#define IP6_ECN_flow_init(label) do { \
+ (label) &= ~htonl(3<<20); \
} while (0)
-#define IP6_ECN_flow_xmit(sk, label) do { \
- if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
- (label) |= __constant_htons(INET_ECN_ECT_0 << 4); \
+#define IP6_ECN_flow_xmit(sk, label) do { \
+ if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
+ (label) |= __constant_htons(2 << 4); \
} while (0)
static inline void IP_ECN_set_ce(struct iphdr *iph)
u32 check = iph->check;
check += __constant_htons(0xFFFE);
iph->check = check + (check>=0xFFFF);
- iph->tos |= INET_ECN_CE;
+ iph->tos |= 1;
}
static inline void IP_ECN_clear(struct iphdr *iph)
{
- iph->tos &= ~INET_ECN_MASK;
+ iph->tos &= ~3;
}
struct ipv6hdr;
static inline void IP6_ECN_set_ce(struct ipv6hdr *iph)
{
- *(u32*)iph |= htonl(INET_ECN_CE << 20);
+ *(u32*)iph |= htonl(1<<20);
}
static inline void IP6_ECN_clear(struct ipv6hdr *iph)
{
- *(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
+ *(u32*)iph &= ~htonl(3<<20);
}
#define ip6_get_dsfield(iph) ((ntohs(*(u16*)(iph)) >> 4) & 0xFF)
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
-/* datagram.c */
-extern int ip4_datagram_connect(struct sock *sk,
- struct sockaddr *uaddr, int addr_len);
/*
* Map a multicast IP onto multicast MAC for type Token Ring.
}
struct ip_reply_arg {
- struct kvec iov[1];
- u32 csum;
- int csumoffset; /* u16 offset of csum in iov[0].iov_base */
- /* -1 if not needed */
+ struct iovec iov[1];
+ u32 csum;
+ int csumoffset; /* u16 offset of csum in iov[0].iov_base */
+ /* -1 if not needed */
};
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
*/
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ size_t *lenp);
int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
/*
* Store a destination cache entry in a socket
+ * For UDP/RAW sockets this is done on udp_connect.
*/
+
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr)
{
extern void ipv6_packet_cleanup(void);
-extern int ip6_datagram_connect(struct sock *sk,
- struct sockaddr *addr, int addr_len);
-
extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, u16 port,
u32 info, u8 *payload);
return(irlap_is_primary(self->lsap->lap->irlap));
}
+extern struct irttp_cb *irttp;
+
#endif /* IRTTP_H */
int write,
struct file * filp,
void __user *buffer,
- size_t *lenp,
- loff_t *ppos);
+ size_t *lenp);
#endif
extern void inet6_ifinfo_notify(int event,
* nr_node & nr_neigh lists, refcounting and locking
*********************************************************************/
+extern struct hlist_head nr_node_list;
+extern struct hlist_head nr_neigh_list;
+
#define nr_node_hold(__nr_node) \
atomic_inc(&((__nr_node)->refcount))
#ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
+#define PSCHED_GETTIMEOFDAY 1
+#define PSCHED_JIFFIES 2
+#define PSCHED_CPU 3
+
+#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
+
#include <linux/config.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
+#ifdef CONFIG_X86_TSC
+#include <asm/msr.h>
+#endif
+
+
struct rtattr;
struct Qdisc;
#define TCQ_F_BUILTIN 1
#define TCQ_F_THROTTLED 2
#define TCQ_F_INGRES 4
- int padded;
struct Qdisc_ops *ops;
+ struct Qdisc *next;
u32 handle;
atomic_t refcnt;
struct sk_buff_head q;
struct net_device *dev;
- struct list_head list;
struct tc_stats stats;
spinlock_t *stats_lock;
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
-};
-
-#define QDISC_ALIGN 32
-#define QDISC_ALIGN_CONST (QDISC_ALIGN - 1)
-static inline void *qdisc_priv(struct Qdisc *q)
-{
- return (char *)q + ((sizeof(struct Qdisc) + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
-}
+ char data[0];
+};
struct qdisc_rate_table
{
int refcnt;
};
-extern void qdisc_lock_tree(struct net_device *dev);
-extern void qdisc_unlock_tree(struct net_device *dev);
+static inline void sch_tree_lock(struct Qdisc *q)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&q->dev->queue_lock);
+}
+
+static inline void sch_tree_unlock(struct Qdisc *q)
+{
+ spin_unlock_bh(&q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+static inline void tcf_tree_lock(struct tcf_proto *tp)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&tp->q->dev->queue_lock);
+}
+
+static inline void tcf_tree_unlock(struct tcf_proto *tp)
+{
+ spin_unlock_bh(&tp->q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+
+static inline unsigned long
+cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
+{
+ unsigned long old_cl;
-#define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
-#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
-#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
-#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
+ tcf_tree_lock(tp);
+ old_cl = *clp;
+ *clp = cl;
+ tcf_tree_unlock(tp);
+ return old_cl;
+}
-#define cls_set_class(tp, clp, cl) tcf_set_class(tp, clp, cl)
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
The reason is that, when it is not the same thing as
gettimeofday, it returns invalid timestamp, which is
not updated, when net_bh is active.
+
+ So, use PSCHED_CLOCK_SOURCE = PSCHED_CPU on alpha and pentiums
+ with rtdsc. And PSCHED_JIFFIES on all other architectures, including [34]86
+ and pentiums without rtdsc.
+ You can use PSCHED_GETTIMEOFDAY on another architectures,
+ which have fast and precise clock source, but it is too expensive.
*/
/* General note about internal clock.
Any clock source returns time intervals, measured in units
- close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
+ close to 1usec. With source PSCHED_GETTIMEOFDAY it is precisely
microseconds, otherwise something close but different chosen to minimize
arithmetic cost. Ratio usec/internal untis in form nominator/denominator
may be read from /proc/net/psched.
*/
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
typedef struct timeval psched_time_t;
typedef long psched_tdiff_t;
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
-#ifdef CONFIG_NET_SCH_CLK_JIFFIES
+extern psched_time_t psched_time_base;
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
#if HZ < 96
#define PSCHED_JSCALE 14
#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
-#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
-#ifdef CONFIG_NET_SCH_CLK_CPU
-#include <asm/timex.h>
+#elif PSCHED_CLOCK_SOURCE == PSCHED_CPU
extern psched_tdiff_t psched_clock_per_hz;
extern int psched_clock_scale;
-extern psched_time_t psched_time_base;
-extern cycles_t psched_time_mark;
-
-#define PSCHED_GET_TIME(stamp) \
-do { \
- cycles_t cur = get_cycles(); \
- if (sizeof(cycles_t) == sizeof(u32)) { \
- if (cur <= psched_time_mark) \
- psched_time_base += 0x100000000ULL; \
- psched_time_mark = cur; \
- (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
- } else { \
- (stamp) = cur>>psched_clock_scale; \
- } \
-} while (0)
+
#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
-#endif /* CONFIG_NET_SCH_CLK_CPU */
+#ifdef CONFIG_X86_TSC
+
+#define PSCHED_GET_TIME(stamp) \
+({ u64 __cur; \
+ rdtscll(__cur); \
+ (stamp) = __cur>>psched_clock_scale; \
+})
+
+#elif defined (__alpha__)
+
+#define PSCHED_WATCHER u32
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+extern PSCHED_WATCHER psched_time_mark;
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#define PSCHED_GET_TIME(stamp) \
+({ u32 __res; \
+ __asm__ __volatile__ ("rpcc %0" : "r="(__res)); \
+ if (__res <= psched_time_mark) psched_time_base += 0x100000000UL; \
+ psched_time_mark = __res; \
+ (stamp) = (psched_time_base + __res)>>psched_clock_scale; \
+})
+
+#else
+
+#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
+
+#endif /* ARCH */
+
+#endif /* PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES */
+
+#endif /* PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY */
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#define PSCHED_TDIFF(tv1, tv2) \
({ \
int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#else
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#endif
struct tcf_police
{
extern int tcf_act_police(struct sk_buff **skb, struct tc_action *a);
#endif
-extern unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
- unsigned long cl);
extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t *lock);
extern void tcf_police_destroy(struct tcf_police *p);
extern int qdisc_restart(struct net_device *dev);
+static inline void qdisc_run(struct net_device *dev)
+{
+ while (!netif_queue_stopped(dev) &&
+ qdisc_restart(dev)<0)
+ /* NOTHING */;
+}
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
#include <linux/vs_context.h>
#include <linux/vs_network.h>
+
#ifndef __KERNEL__
#warning This file is not supposed to be used outside of kernel.
#endif
return ip_tos2prio[IPTOS_TOS(tos)>>1];
}
+#define IPI_LOOPBACK 0x0100007f
+
+static inline int ip_find_src(struct nx_info *nxi, struct rtable **rp, struct flowi *fl)
+{
+ int err;
+ int i, n = nxi->nbipv4;
+ u32 ipv4root = nxi->ipv4[0];
+
+ if (ipv4root == 0)
+ return 0;
+
+ if (fl->fl4_src == 0) {
+ if (n > 1) {
+ u32 foundsrc;
+
+ err = __ip_route_output_key(rp, fl);
+ if (err) {
+ fl->fl4_src = ipv4root;
+ err = __ip_route_output_key(rp, fl);
+ }
+ if (err)
+ return err;
+
+ foundsrc = (*rp)->rt_src;
+ ip_rt_put(*rp);
+
+ for (i=0; i<n; i++){
+ u32 mask = nxi->mask[i];
+ u32 ipv4 = nxi->ipv4[i];
+ u32 net4 = ipv4 & mask;
+
+ if (foundsrc == ipv4) {
+ fl->fl4_src = ipv4;
+ break;
+ }
+ if (!fl->fl4_src && (foundsrc & mask) == net4)
+ fl->fl4_src = ipv4;
+ }
+ }
+ if (fl->fl4_src == 0)
+ fl->fl4_src = (fl->fl4_dst == IPI_LOOPBACK)
+ ? IPI_LOOPBACK : ipv4root;
+ } else {
+ for (i=0; i<n; i++) {
+ if (nxi->ipv4[i] == fl->fl4_src)
+ break;
+ }
+ if (i == n)
+ return -EPERM;
+ }
+ return 0;
+}
+
static inline int ip_route_connect(struct rtable **rp, u32 dst,
u32 src, u32 tos, int oif, u8 protocol,
u16 sport, u16 dport, struct sock *sk)
.dport = dport } } };
int err;
- if (!dst || !src) {
+ struct nx_info *nx_info = current->nx_info;
+
+ if (sk)
+ nx_info = sk->sk_nx_info;
+ vxdprintk("ip_route_connect(%p) %p,%p;%lx\n",
+ sk, nx_info, sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+
+ if (nx_info) {
+ err = ip_find_src(nx_info, rp, &fl);
+ if (err)
+ return err;
+ if (fl.fl4_dst == IPI_LOOPBACK && !vx_check(0, VX_ADMIN))
+ fl.fl4_dst = nx_info->ipv4[0];
+ }
+ if (!fl.fl4_dst || !fl.fl4_src) {
err = __ip_route_output_key(rp, &fl);
if (err)
return err;
SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
- SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
- SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
- SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
SCTP_CMD_LAST
} sctp_verb_t;
SCTP_IERROR_BAD_TAG,
SCTP_IERROR_BIG_GAP,
SCTP_IERROR_DUP_TSN,
- SCTP_IERROR_HIGH_TSN,
- SCTP_IERROR_IGNORE_TSN,
- SCTP_IERROR_NO_DATA,
- SCTP_IERROR_BAD_STREAM,
} sctp_ierror_t;
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
-int sctp_eat_data(const struct sctp_association *asoc,
- struct sctp_chunk *chunk,
- sctp_cmd_seq_t *commands);
/* 3rd level prototypes */
__u32 sctp_generate_tag(const struct sctp_endpoint *);
#define _SNMP_H
#include <linux/cache.h>
-#include <linux/snmp.h>
-
-/*
- * Mibs are stored in array of unsigned long.
- */
+
/*
- * struct snmp_mib{}
- * - list of entries for particular API (such as /proc/net/snmp)
- * - name of entries.
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
*/
-struct snmp_mib {
+
+/*
+ * The rule for padding:
+ * Best is power of two because then the right structure can be found by a simple
+ * shift. The structure should be always cache line aligned.
+ * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add instructions
+ * to emulate multiply in case it is not power-of-two. Currently n is always <=3 for
+ * all sizes so simple cache line alignment is enough.
+ *
+ * The best solution would be a global CPU local area , especially on 64 and 128byte
+ * cacheline machine it makes a *lot* of sense -AK
+ */
+
+struct snmp_item {
char *name;
- int entry;
+ int offset;
};
-#define SNMP_MIB_ITEM(_name,_entry) { \
- .name = _name, \
- .entry = _entry, \
+#define SNMP_ITEM(mib,entry,procname) { \
+ .name = procname, \
+ .offset = offsetof(mib, entry), \
}
-#define SNMP_MIB_SENTINEL { \
- .name = NULL, \
- .entry = 0, \
+#define SNMP_ITEM_SENTINEL { \
+ .name = NULL, \
+ .offset = 0, \
}
/*
- * We use all unsigned longs. Linux will soon be so reliable that even
- * these will rapidly get too small 8-). Seriously consider the IpInReceives
- * count on the 20Gb/s + networks people expect in a few years time!
+ * RFC 1213: MIB-II
+ * RFC 2011 (updates 1213): SNMPv2-MIB-IP
+ * RFC 2863: Interfaces Group MIB
+ * RFC 2465: IPv6 MIB: General Group
+ * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables
*/
+struct ipstats_mib
+{
+ unsigned long InReceives;
+ unsigned long InHdrErrors;
+ unsigned long InTooBigErrors;
+ unsigned long InNoRoutes;
+ unsigned long InAddrErrors;
+ unsigned long InUnknownProtos;
+ unsigned long InTruncatedPkts;
+ unsigned long InDiscards;
+ unsigned long InDelivers;
+ unsigned long OutForwDatagrams;
+ unsigned long OutRequests;
+ unsigned long OutDiscards;
+ unsigned long OutNoRoutes;
+ unsigned long ReasmTimeout;
+ unsigned long ReasmReqds;
+ unsigned long ReasmOKs;
+ unsigned long ReasmFails;
+ unsigned long FragOKs;
+ unsigned long FragFails;
+ unsigned long FragCreates;
+ unsigned long InMcastPkts;
+ unsigned long OutMcastPkts;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II ICMP Group
+ * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group
+ */
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+ unsigned long dummy;
+ unsigned long __pad[0];
+};
-/*
- * The rule for padding:
- * Best is power of two because then the right structure can be found by a
- * simple shift. The structure should be always cache line aligned.
- * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
- * instructions to emulate multiply in case it is not power-of-two.
- * Currently n is always <=3 for all sizes so simple cache line alignment
- * is enough.
- *
- * The best solution would be a global CPU local area , especially on 64
- * and 128byte cacheline machine it makes a *lot* of sense -AK
- */
+/*
+ * RFC 2466: ICMPv6-MIB
+ */
+struct icmpv6_mib
+{
+ unsigned long Icmp6InMsgs;
+ unsigned long Icmp6InErrors;
+
+ unsigned long Icmp6InDestUnreachs;
+ unsigned long Icmp6InPktTooBigs;
+ unsigned long Icmp6InTimeExcds;
+ unsigned long Icmp6InParmProblems;
+
+ unsigned long Icmp6InEchos;
+ unsigned long Icmp6InEchoReplies;
+ unsigned long Icmp6InGroupMembQueries;
+ unsigned long Icmp6InGroupMembResponses;
+ unsigned long Icmp6InGroupMembReductions;
+ unsigned long Icmp6InRouterSolicits;
+ unsigned long Icmp6InRouterAdvertisements;
+ unsigned long Icmp6InNeighborSolicits;
+ unsigned long Icmp6InNeighborAdvertisements;
+ unsigned long Icmp6InRedirects;
+
+ unsigned long Icmp6OutMsgs;
+
+ unsigned long Icmp6OutDestUnreachs;
+ unsigned long Icmp6OutPktTooBigs;
+ unsigned long Icmp6OutTimeExcds;
+ unsigned long Icmp6OutParmProblems;
+
+ unsigned long Icmp6OutEchoReplies;
+ unsigned long Icmp6OutRouterSolicits;
+ unsigned long Icmp6OutNeighborSolicits;
+ unsigned long Icmp6OutNeighborAdvertisements;
+ unsigned long Icmp6OutRedirects;
+ unsigned long Icmp6OutGroupMembResponses;
+ unsigned long Icmp6OutGroupMembReductions;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II TCP group
+ * RFC 2012 (updates 1213): SNMPv2-MIB-TCP
+ */
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+ unsigned long TcpInErrs;
+ unsigned long TcpOutRsts;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II UDP group
+ * RFC 2013 (updates 1213): SNMPv2-MIB-UDP
+ */
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+ unsigned long __pad[0];
+};
+
+/* draft-ietf-sigtran-sctp-mib-07.txt */
+struct sctp_mib
+{
+ unsigned long SctpCurrEstab;
+ unsigned long SctpActiveEstabs;
+ unsigned long SctpPassiveEstabs;
+ unsigned long SctpAborteds;
+ unsigned long SctpShutdowns;
+ unsigned long SctpOutOfBlues;
+ unsigned long SctpChecksumErrors;
+ unsigned long SctpOutCtrlChunks;
+ unsigned long SctpOutOrderChunks;
+ unsigned long SctpOutUnorderChunks;
+ unsigned long SctpInCtrlChunks;
+ unsigned long SctpInOrderChunks;
+ unsigned long SctpInUnorderChunks;
+ unsigned long SctpFragUsrMsgs;
+ unsigned long SctpReasmUsrMsgs;
+ unsigned long SctpOutSCTPPacks;
+ unsigned long SctpInSCTPPacks;
+ unsigned long SctpRtoAlgorithm;
+ unsigned long SctpRtoMin;
+ unsigned long SctpRtoMax;
+ unsigned long SctpRtoInitial;
+ unsigned long SctpValCookieLife;
+ unsigned long SctpMaxInitRetr;
+ unsigned long __pad[0];
+};
-#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
-
-/* IPstats */
-#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
-struct ipstats_mib {
- unsigned long mibs[IPSTATS_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* ICMP */
-#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
-#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
-
-struct icmp_mib {
- unsigned long mibs[ICMP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* ICMP6 (IPv6-ICMP) */
-#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
-struct icmpv6_mib {
- unsigned long mibs[ICMP6_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* TCP */
-#define TCP_MIB_MAX __TCP_MIB_MAX
-struct tcp_mib {
- unsigned long mibs[TCP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* UDP */
-#define UDP_MIB_MAX __UDP_MIB_MAX
-struct udp_mib {
- unsigned long mibs[UDP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* SCTP */
-#define SCTP_MIB_MAX __SCTP_MIB_MAX
-struct sctp_mib {
- unsigned long mibs[SCTP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* Linux */
-#define LINUX_MIB_MAX __LINUX_MIB_MAX
-struct linux_mib {
- unsigned long mibs[LINUX_MIB_MAX];
+struct linux_mib
+{
+ unsigned long SyncookiesSent;
+ unsigned long SyncookiesRecv;
+ unsigned long SyncookiesFailed;
+ unsigned long EmbryonicRsts;
+ unsigned long PruneCalled;
+ unsigned long RcvPruned;
+ unsigned long OfoPruned;
+ unsigned long OutOfWindowIcmps;
+ unsigned long LockDroppedIcmps;
+ unsigned long ArpFilter;
+ unsigned long TimeWaited;
+ unsigned long TimeWaitRecycled;
+ unsigned long TimeWaitKilled;
+ unsigned long PAWSPassiveRejected;
+ unsigned long PAWSActiveRejected;
+ unsigned long PAWSEstabRejected;
+ unsigned long DelayedACKs;
+ unsigned long DelayedACKLocked;
+ unsigned long DelayedACKLost;
+ unsigned long ListenOverflows;
+ unsigned long ListenDrops;
+ unsigned long TCPPrequeued;
+ unsigned long TCPDirectCopyFromBacklog;
+ unsigned long TCPDirectCopyFromPrequeue;
+ unsigned long TCPPrequeueDropped;
+ unsigned long TCPHPHits;
+ unsigned long TCPHPHitsToUser;
+ unsigned long TCPPureAcks;
+ unsigned long TCPHPAcks;
+ unsigned long TCPRenoRecovery;
+ unsigned long TCPSackRecovery;
+ unsigned long TCPSACKReneging;
+ unsigned long TCPFACKReorder;
+ unsigned long TCPSACKReorder;
+ unsigned long TCPRenoReorder;
+ unsigned long TCPTSReorder;
+ unsigned long TCPFullUndo;
+ unsigned long TCPPartialUndo;
+ unsigned long TCPDSACKUndo;
+ unsigned long TCPLossUndo;
+ unsigned long TCPLoss;
+ unsigned long TCPLostRetransmit;
+ unsigned long TCPRenoFailures;
+ unsigned long TCPSackFailures;
+ unsigned long TCPLossFailures;
+ unsigned long TCPFastRetrans;
+ unsigned long TCPForwardRetrans;
+ unsigned long TCPSlowStartRetrans;
+ unsigned long TCPTimeouts;
+ unsigned long TCPRenoRecoveryFail;
+ unsigned long TCPSackRecoveryFail;
+ unsigned long TCPSchedulerFailed;
+ unsigned long TCPRcvCollapsed;
+ unsigned long TCPDSACKOldSent;
+ unsigned long TCPDSACKOfoSent;
+ unsigned long TCPDSACKRecv;
+ unsigned long TCPDSACKOfoRecv;
+ unsigned long TCPAbortOnSyn;
+ unsigned long TCPAbortOnData;
+ unsigned long TCPAbortOnClose;
+ unsigned long TCPAbortOnMemory;
+ unsigned long TCPAbortOnTimeout;
+ unsigned long TCPAbortOnLinger;
+ unsigned long TCPAbortFailed;
+ unsigned long TCPMemoryPressures;
+ unsigned long __pad[0];
};
/*
- * FIXME: On x86 and some other CPUs the split into user and softirq parts
- * is not needed because addl $1,memory is atomic against interrupts (but
- * atomic_inc would be overkill because of the lock cycles). Wants new
- * nonlocked_atomic_inc() primitives -AK
+ * FIXME: On x86 and some other CPUs the split into user and softirq parts is not needed because
+ * addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock
+ * cycles). Wants new nonlocked_atomic_inc() primitives -AK
*/
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[0], smp_processor_id())->field++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field + (offset)]++)
+ ((*((&per_cpu_ptr(mib[0], smp_processor_id())->field) + (offset)))++)
#define SNMP_INC_STATS_USER(mib, field) \
- (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[1], smp_processor_id())->field++)
#define SNMP_INC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field++)
#define SNMP_DEC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]--)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field] += addend)
+ (per_cpu_ptr(mib[0], smp_processor_id())->field += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
- (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field] += addend)
-
+ (per_cpu_ptr(mib[1], smp_processor_id())->field += addend)
+
#endif
*/
/* Define this to get the sk->sk_debug debugging facility. */
-//#define SOCK_DEBUGGING
+#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \
printk(KERN_DEBUG msg); } while (0)
* @sk_timer - sock cleanup timer
* @sk_stamp - time stamp of last packet received
* @sk_socket - Identd and reporting IO signals
- * @sk_user_data - RPC and Tux layer private data
+ * @sk_user_data - RPC layer private data
* @sk_owner - module that owns this socket
* @sk_sndmsg_page - cached page for sendmsg
* @sk_sndmsg_off - cached offset for sendmsg
* @sk_data_ready - callback to indicate there is data to be processed
* @sk_write_space - callback to indicate there is bf sending space available
* @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
- * @sk_create_child - callback to get new socket events
* @sk_backlog_rcv - callback to process the backlog
* @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
*/
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
- void (*sk_create_child)(struct sock *sk, struct sock *newsk);
void (*sk_destruct)(struct sock *sk);
};
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
+
#endif
/*
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
-extern struct proto_ops inet_stream_ops;
-
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err = 0;
int skb_len;
- /* Silently drop if VNET is active (if INET bind() has been
- * overridden) and the context is not entitled to read the
- * packet.
- */
- if (inet_stream_ops.bind != inet_bind &&
- (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid) {
- err = -EPERM;
- goto out;
- }
-
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
extern void tcp_push_one(struct sock *, unsigned mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
-extern void cleanup_rbuf(struct sock *sk, int copied);
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
/* Return 0, if packet can be sent now without violation Nagle's rules:
1. It is full sized.
2. Or it contains FIN.
- 3. Or higher layers meant to force a packet boundary, hence the PSH bit.
- 4. Or TCP_NODELAY was set.
- 5. Or TCP_CORK is not set, and all sent packets are ACKed.
+ 3. Or TCP_NODELAY was set.
+ 4. Or TCP_CORK is not set, and all sent packets are ACKed.
With Minshall's modification: all sent small packets are ACKed.
*/
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+ NET_INC_STATS_BH(TCPPrequeueDropped);
}
tp->ucopy.memory = 0;
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
- TCP_INC_STATS(TCP_MIB_CURRESTAB);
+ TCP_INC_STATS(TcpCurrEstab);
break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
- TCP_INC_STATS(TCP_MIB_ESTABRESETS);
+ TCP_INC_STATS(TcpEstabResets);
sk->sk_prot->unhash(sk);
if (tcp_sk(sk)->bind_hash &&
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
- TCP_DEC_STATS(TCP_MIB_CURRESTAB);
+ TCP_DEC_STATS(TcpCurrEstab);
}
/* Change state AFTER socket is unhashed to avoid closed
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
+ TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(TcpMaxConn, -1);
}
/* /proc */
+++ /dev/null
-#ifndef _NET_TUX_H
-#define _NET_TUX_H
-
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * tux.h: main structure definitions and function prototypes
- */
-
-#define __KERNEL_SYSCALLS__
-
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/wait.h>
-#include <linux/namei.h>
-#include <linux/file.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/ctype.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/unistd.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/pagemap.h>
-#include <linux/vmalloc.h>
-#include <linux/utsname.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel_stat.h>
-#include <linux/kernel_stat.h>
-#include <linux/time.h>
-#include <asm/div64.h>
-#include <asm/unaligned.h>
-#include <linux/compiler.h>
-#include <linux/mount.h>
-#include <linux/zlib.h>
-
-#include <net/tcp.h>
-#include <net/tux_u.h>
-
-/* Maximum number of threads: */
-#define CONFIG_TUX_NUMTHREADS 8
-
-/* Number of cachemiss/IO threads: */
-#define NR_IO_THREADS 32
-
-/* Maximum number of listen sockets per thread: */
-#define CONFIG_TUX_NUMSOCKETS 16
-
-extern spinlock_t tux_module_lock;
-extern struct module *tux_module;
-extern asmlinkage long (*sys_tux_ptr) (unsigned int action, user_req_t *u_info);
-
-#undef Dprintk
-
-extern int tux_TDprintk;
-extern int tux_Dprintk;
-
-#define TUX_DEBUG CONFIG_TUX_DEBUG
-#if CONFIG_TUX_DEBUG
-# define TUX_BUG() BUG()
-
-# define TUX_DPRINTK 1
-# define TDprintk(x...) do { if (tux_TDprintk) { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } } while (0)
-# define Dprintk(x...) do { if (tux_Dprintk == 1) TDprintk(x); } while (0)
-#else
-# define TUX_DPRINTK 0
-# define Dprintk(x...) do { } while (0)
-# define TDprintk(x...) do { } while (0)
-//# define TUX_BUG() BUG()
-# define TUX_BUG() do { } while (0)
-#endif
-
-#if 1
-# define INC_STAT(x) do { } while (0)
-# define DEC_STAT(x) do { } while (0)
-# define ADD_STAT(x,y) do { } while (0)
-# define SUB_STAT(x,y) do { } while (0)
-#else
-# define INC_STAT(x) atomic_inc((atomic_t *)&kstat.x)
-# define DEC_STAT(x) atomic_dec((atomic_t *)&kstat.x)
-# define ADD_STAT(y,x) atomic_add(y,(atomic_t *)&kstat.x)
-# define SUB_STAT(y,x) atomic_sub(y,(atomic_t *)&kstat.x)
-#endif
-
-// lru needs this:
-
-# define DEBUG_DEL_LIST(x...) do { INIT_LIST_HEAD((x)); } while (0)
-
-
-#define LOG_LEN (8*1024*1024UL)
-
-struct tux_req_struct;
-typedef struct tux_req_struct tux_req_t;
-typedef struct tux_threadinfo threadinfo_t;
-
-extern struct address_space_operations url_aops;
-
-typedef struct tcapi_template_s {
- char *vfs_name;
- struct list_head modules;
- int (*query) (tux_req_t *req);
- struct module *mod;
- unsigned int userspace_id;
-} tcapi_template_t;
-
-typedef struct mimetype_s {
- struct list_head list;
-
- char *ext;
- unsigned int ext_len;
- char *type;
- unsigned int type_len;
- char *expire_str;
- unsigned int expire_str_len;
-
- unsigned int special;
-} mimetype_t;
-
-typedef struct tux_attribute_s {
- mimetype_t *mime;
- tcapi_template_t *tcapi;
-} tux_attribute_t;
-
-#define MAX_TUX_ATOMS 8
-
-typedef void (atom_func_t)(tux_req_t *req, int cachemiss);
-
-typedef struct tux_proto_s
-{
- unsigned int defer_accept;
- unsigned int can_redirect;
- void (*got_request) (tux_req_t *req);
- int (*parse_message) (tux_req_t *req, const int total_len);
- atom_func_t *illegal_request;
- atom_func_t *request_timeout;
- void (*pre_log) (tux_req_t *req);
- int (*check_req_err) (tux_req_t *req, int cachemiss);
- char * (*print_dir_line) (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode);
- const char *name;
- struct nameidata main_docroot;
-} tux_proto_t;
-
-typedef struct tux_socket_s {
- tux_proto_t *proto;
- unsigned int ip;
- unsigned short port;
- struct proc_dir_entry *entry;
-} tux_socket_t;
-
-extern tux_socket_t tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS];
-
-
-typedef struct abuf_s {
- struct page *page;
- char *buf;
- unsigned int size;
- unsigned int max_len;
- unsigned int offset;
- unsigned int left;
- unsigned long flags;
-} abuf_t;
-
-struct linux_dirent64 {
- u64 d_ino;
- s64 d_off;
- unsigned short d_reclen;
- unsigned char d_type;
- char d_name[0];
-};
-
-struct getdents_callback64 {
- struct linux_dirent64 * current_dir;
- struct linux_dirent64 * previous;
- int count;
- int error;
-};
-
-#define TUX_MAGIC 0x12457801
-
-#define MAX_TUX_ATOMS 8
-
-struct tux_req_struct
-{
- tux_proto_t *proto;
-
- int atom_idx;
- atom_func_t *atoms [MAX_TUX_ATOMS];
- struct list_head work;
-
- struct list_head all;
- struct list_head free;
- struct list_head lru;
-
- unsigned long idle_input;
- unsigned long wait_output_space;
-
- struct socket *sock;
- struct dentry *dentry;
- struct vfsmount *mnt;
- struct dentry *docroot_dentry;
- struct vfsmount *docroot_mnt;
- struct dentry *cwd_dentry;
- struct vfsmount *cwd_mnt;
-
- struct file in_file;
- int fd;
- read_descriptor_t desc;
- u32 client_addr;
- u32 client_port;
- unsigned int virtual;
-
- loff_t total_file_len;
- unsigned int lendigits;
- loff_t offset_start;
- loff_t offset_end;
- loff_t output_len;
-
- loff_t ftp_offset_start;
-
- time_t mtime;
- unsigned int etaglen;
- char etag [40];
-
- char usermode;
- unsigned int usermodule_idx;
- struct dentry *module_dentry;
- struct vfsmount *module_mnt;
- char *userbuf;
- unsigned int userlen;
-
- tux_attribute_t *attr;
-
- threadinfo_t *ti;
- wait_queue_t sleep;
- wait_queue_t ftp_sleep;
-
- abuf_t abuf;
- /*
- * Parsed request fields. In-line strings are zero-delimited.
- */
- const char *headers;
- unsigned int headers_len;
-
- unsigned int parsed_len;
-
- // FTP part
- ftp_command_t ftp_command;
- u32 ftp_user_addr;
- u16 ftp_user_port;
-
- struct socket *data_sock;
- unsigned int prev_pos;
-
- // ls handing:
- struct linux_dirent64 *dirp0;
- unsigned int curroff, total;
-
-#define MAX_USERNAME_LEN 16
- char username[MAX_USERNAME_LEN];
- unsigned int username_len;
-
- // HTTP part
- http_method_t method;
- const char *method_str;
- unsigned int method_len;
-
- http_version_t version;
- const char *version_str;
- unsigned int version_len;
-
- /* requested URI: */
-
- const char *uri_str;
- unsigned int uri_len;
-
- /* Objectname (filename/scriptname) this URI refers to: */
-
-#define MAX_OBJECTNAME_LEN 256
- char objectname[MAX_OBJECTNAME_LEN + 4]; // space for .gz as well
- unsigned int objectname_len;
-
- /* Query string within the URI: */
-
- const char *query_str;
- unsigned int query_len;
-
- /* Cookies: */
-
- const char *cookies_str;
- unsigned int cookies_len;
- unsigned int parse_cookies;
-
- /* Content-TYpe */
- const char *content_type_str;
- unsigned int content_type_len;
-
- /* Content-Length: */
-
- const char *contentlen_str;
- unsigned int contentlen_len;
- unsigned int content_len;
-
- /* User-Agent: */
-
- const char *user_agent_str;
- unsigned int user_agent_len;
-
- /* Accept: */
-
- const char *accept_str;
- unsigned int accept_len;
-
- /* Accept-Charset: */
-
- const char *accept_charset_str;
- unsigned int accept_charset_len;
-
- /* Accept-Language: */
-
- const char *accept_language_str;
- unsigned int accept_language_len;
-
- /* Cache-Control: */
-
- const char *cache_control_str;
- unsigned int cache_control_len;
-
- /* If-Modified-Since: */
-
- const char *if_modified_since_str;
- unsigned int if_modified_since_len;
-
- /* If-None-Match: */
- const char *if_none_match_str;
- unsigned int if_none_match_len;
-
- /* If-Range: */
-
- const char *if_range_str;
- unsigned int if_range_len;
-
- /* Negotiate: */
-
- const char *negotiate_str;
- unsigned int negotiate_len;
-
- /* Pragma: */
-
- const char *pragma_str;
- unsigned int pragma_len;
-
- /* Referer: */
-
- const char *referer_str;
- unsigned int referer_len;
-
- /* Accept-Encoding: */
-
- const char *accept_encoding_str;
- unsigned int accept_encoding_len;
- unsigned int may_send_gzip;
- unsigned int content_gzipped;
-
- /* Host */
-
-#define MAX_HOST_LEN 128
- char host[MAX_HOST_LEN];
- unsigned int host_len;
-
- /* POSTed data: */
-
- const char *post_data_str;
- unsigned int post_data_len;
-
- unsigned int status;
-
- /* the file being sent */
-
- unsigned int bytes_sent;
-#if CONFIG_TUX_DEBUG
- unsigned int bytes_expected;
-#endif
- unsigned long first_timestamp;
- unsigned int body_len;
-
- unsigned int user_error;
-
- char error;
- char postponed;
-
- char had_cachemiss;
- char lookup_dir;
- char lookup_404;
-
- char keep_alive;
- struct timer_list keepalive_timer;
- unsigned int total_bytes;
- struct timer_list output_timer;
-
- unsigned int nr_keepalives;
-
- unsigned int event;
- u64 private;
-
- unsigned int magic;
- void (*real_data_ready)(struct sock *sk, int space);
- void (*real_state_change)(struct sock *sk);
- void (*real_write_space)(struct sock *sk);
- void (*real_error_report)(struct sock *sk);
- void (*real_destruct)(struct sock *sk);
-
- void (*ftp_real_data_ready)(struct sock *sk, int space);
- void (*ftp_real_state_change)(struct sock *sk);
- void (*ftp_real_write_space)(struct sock *sk);
- void (*ftp_real_error_report)(struct sock *sk);
- void (*ftp_real_create_child)(struct sock *sk, struct sock *newsk);
- void (*ftp_real_destruct)(struct sock *sk);
-
-#if CONFIG_TUX_EXTENDED_LOG
- unsigned long accept_timestamp;
- unsigned long parse_timestamp;
- unsigned long output_timestamp;
- unsigned long flush_timestamp;
-# define SET_TIMESTAMP(x) do { (x) = jiffies; } while (0)
-#else
-# define SET_TIMESTAMP(x) do { } while (0)
-#endif
-
-};
-
-extern void add_tux_atom (tux_req_t *req, atom_func_t *event_done);
-extern void del_tux_atom (tux_req_t *req);
-extern void tux_schedule_atom (tux_req_t *req, int cachemiss);
-extern void add_req_to_workqueue (tux_req_t *req);
-
-
-typedef struct iothread_s
-{
- spinlock_t async_lock;
- threadinfo_t *ti;
- struct list_head async_queue;
- wait_queue_head_t async_sleep;
- unsigned int nr_async_pending;
- unsigned int threads;
- unsigned int shutdown;
- wait_queue_head_t wait_shutdown;
-} iothread_t;
-
-typedef struct tux_listen_s
-{
- tux_proto_t *proto;
- struct socket *sock;
- unsigned int cloned;
-} tux_listen_t;
-
-struct tux_threadinfo
-{
- tux_req_t *userspace_req;
- unsigned int started;
- struct task_struct *thread;
- iothread_t *iot;
- wait_queue_t wait_event [CONFIG_TUX_NUMSOCKETS];
- wait_queue_t stop;
- unsigned int pid;
-
- struct page *header_cache;
- unsigned int header_offset;
-
- unsigned int nr_requests;
- struct list_head all_requests;
-
- unsigned int nr_free_requests;
- spinlock_t free_requests_lock;
- struct list_head free_requests;
-
- spinlock_t work_lock;
- struct list_head work_pending;
- struct list_head lru;
- unsigned int nr_lru;
-
- unsigned int listen_error;
- tux_listen_t listen[CONFIG_TUX_NUMSOCKETS];
-
- struct semaphore gzip_sem;
- z_stream gzip_state;
-
- unsigned int cpu;
- unsigned int __padding[16];
-};
-
-typedef enum special_mimetypes {
- NORMAL_MIME_TYPE,
- MIME_TYPE_REDIRECT,
- MIME_TYPE_CGI,
- MIME_TYPE_MODULE,
-} special_mimetypes_t;
-
-#if CONFIG_TUX_DEBUG
-#if 0
-extern inline void url_hist_hit (int size)
-{
- unsigned int idx = size/1024;
-
- if (idx >= URL_HIST_SIZE)
- idx = URL_HIST_SIZE-1;
- kstat.url_hist_hits[idx]++;
-}
-extern inline void url_hist_miss (int size)
-{
- unsigned int idx = size/1024;
-
- if (idx >= URL_HIST_SIZE)
- idx = URL_HIST_SIZE-1;
- kstat.url_hist_misses[idx]++;
-}
-#endif
-extern void __check_req_list (tux_req_t *req, struct list_head *list);
-# define check_req_list __check_req_list
-#else
-# define check_req_list(req, list) do { } while (0)
-#endif
-
-#define url_hist_hit(size) do { } while (0)
-#define url_hist_miss(size) do { } while (0)
-
-extern char tux_common_docroot[200];
-extern char tux_http_subdocroot[200];
-extern char tux_ftp_subdocroot[200];
-extern char tux_logfile[200];
-extern char tux_cgiroot[200];
-extern char tux_404_page[200];
-extern char tux_default_vhost[200];
-extern char tux_extra_html_header[600];
-extern unsigned int tux_extra_html_header_size;
-extern int tux_cgi_uid;
-extern int tux_cgi_gid;
-extern unsigned int tux_clientport;
-extern unsigned int tux_logging;
-extern unsigned int tux_threads;
-extern unsigned int tux_keepalive_timeout;
-extern unsigned int tux_max_output_bandwidth;
-extern unsigned int tux_max_backlog;
-extern unsigned int tux_max_connect;
-extern unsigned int tux_mode_forbidden;
-extern unsigned int tux_mode_allowed;
-extern unsigned int tux_logentry_align_order;
-extern unsigned int tux_nonagle;
-extern unsigned int tux_ack_pingpong;
-extern unsigned int tux_push_all;
-extern unsigned int tux_zerocopy_parse;
-extern unsigned int tux_generate_etags;
-extern unsigned int tux_generate_last_mod;
-extern unsigned int tux_generate_cache_control;
-extern unsigned int tux_ip_logging;
-extern unsigned int tux_ftp_wait_close;
-extern unsigned int tux_ftp_log_retr_only;
-extern unsigned int tux_hide_unreadable;
-
-typedef enum virtual_server {
- TUX_VHOST_NONE,
- TUX_VHOST_HOST,
- TUX_VHOST_IP,
- TUX_VHOST_IP_HOST,
-} virtual_server_t;
-
-extern unsigned int tux_virtual_server;
-extern unsigned int mass_hosting_hash;
-extern unsigned int strip_host_tail;
-extern unsigned int tux_ftp_virtual_server;
-
-extern unsigned int tux_max_object_size;
-extern unsigned int tux_max_free_requests;
-extern unsigned int tux_defer_accept;
-
-extern struct socket * start_listening(tux_socket_t *listen, int nr);
-extern void stop_listening(struct socket **sock);
-extern void start_sysctl(void);
-extern void end_sysctl(void);
-extern void flush_request (tux_req_t *req, int cachemiss);
-extern void unlink_tux_socket (tux_req_t *req);
-extern void unlink_tux_data_socket (tux_req_t *req);
-extern void unlink_tux_listen_socket (tux_req_t *req);
-extern void link_tux_ftp_accept_socket (tux_req_t *req, struct socket *sock);
-extern void link_tux_data_socket (tux_req_t *req, struct socket *sock);
-extern void tux_push_req (tux_req_t *req);
-extern int send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags);
-extern void __send_async_message (tux_req_t *req, const char *message, int status, unsigned int size, int push);
-#define send_async_message(req,str,status,push) \
- __send_async_message(req,str,status,strlen(str),push)
-
-extern void send_success (tux_req_t *req, struct socket *sock);
-extern void send_async_err_not_found (tux_req_t *req);
-extern void send_async_timed_out (tux_req_t *req);
-
-extern void kfree_req (tux_req_t *req);
-extern int accept_requests (threadinfo_t *ti);
-extern int process_requests (threadinfo_t *ti, tux_req_t **user_req);
-extern int flush_freequeue (threadinfo_t * ti);
-extern int tux_flush_workqueue (threadinfo_t *ti);
-extern tux_req_t * pick_userspace_req (threadinfo_t *ti);
-extern atom_func_t redirect_request;
-extern atom_func_t parse_request;
-extern void queue_cachemiss (tux_req_t *req);
-extern int start_cachemiss_threads (threadinfo_t *ti);
-extern void stop_cachemiss_threads (threadinfo_t *ti);
-struct file * tux_open_file(char *filename, int mode);
-extern void start_log_thread (void);
-extern void stop_log_thread (void);
-extern void add_mimetype (char *new_ext, char *new_type, char *new_expire);
-extern void free_mimetypes (void);
-extern int lookup_object (tux_req_t *req, const unsigned int flag);
-extern int handle_gzip_req (tux_req_t *req, unsigned int flags);
-extern struct dentry * tux_lookup (tux_req_t *req, const char *filename, const unsigned int flag, struct vfsmount **mnt);
-extern tcapi_template_t * lookup_tuxmodule (const char *filename);
-extern int register_tuxmodule (tcapi_template_t *tcapi);
-extern tcapi_template_t * unregister_tuxmodule (char *vfs_name);
-extern tcapi_template_t * get_first_usermodule (void);
-extern int user_register_module (user_req_t *u_info);
-extern int user_unregister_module (user_req_t *u_info);
-extern void unregister_all_tuxmodules (void);
-
-typedef struct exec_param_s {
- char *command;
- char **argv;
- char **envp;
- unsigned int pipe_fds;
-} exec_param_t;
-
-extern pid_t tux_exec_process (char *command, char **argv, char **envp, int pipe_fds, exec_param_t *param, int wait);
-
-extern void start_external_cgi (tux_req_t *req);
-extern tcapi_template_t extcgi_tcapi;
-
-extern void queue_output_req (tux_req_t *req, threadinfo_t *ti);
-extern void queue_userspace_req (tux_req_t *req, threadinfo_t *ti);
-
-
-extern void __log_request (tux_req_t *req);
-extern inline void log_request (tux_req_t *req)
-{
- if (tux_logging)
- __log_request(req);
-}
-
-extern int __connection_too_fast (tux_req_t *req);
-
-#define connection_too_fast(req) \
- ({ \
- int __ret = 1; \
- if (unlikely(tux_max_output_bandwidth)) \
- __ret = __connection_too_fast(req); \
- __ret; \
- })
-
-extern void trunc_headers (tux_req_t *req);
-extern int generic_send_file (tux_req_t *req, struct socket *sock, int cachemiss);
-extern int tux_fetch_file (tux_req_t *req, int nonblock);
-
-extern void postpone_request (tux_req_t *req);
-extern int continue_request (int fd);
-extern void tux_push_pending (struct sock *sk);
-extern void zap_request (tux_req_t *req, int cachemiss);
-extern int add_output_space_event (tux_req_t *req, struct socket *sock);
-
-extern void reap_kids (void);
-extern void unuse_frag (struct sk_buff *skb, skb_frag_t *frag);
-extern skb_frag_t * build_dynbuf_frag (tux_req_t *req, unsigned int size);
-extern int tux_permission (struct inode *inode);
-extern void flush_all_signals (void);
-
-#define D() Dprintk("{%s:%d}\n", __FILE__, __LINE__)
-
-extern int nr_async_io_pending (void);
-
-extern void __add_keepalive_timer (tux_req_t *req);
-#define add_keepalive_timer(req) \
-do { \
- if (tux_keepalive_timeout) { \
- Dprintk("add_keepalive_timer(%p).\n", (req)); \
- __add_keepalive_timer(req); \
- } \
-} while (0)
-extern void __del_keepalive_timer (tux_req_t *req);
-#define del_keepalive_timer(req) \
-do { \
- if (tux_keepalive_timeout) { \
- Dprintk("del_keepalive_timer(%p).\n", (req)); \
- __del_keepalive_timer(req); \
- } \
-} while (0)
-
-extern void del_output_timer (tux_req_t *req);
-extern void output_timeout (tux_req_t *req);
-
-extern void print_req (tux_req_t *req);
-
-extern char tux_date [DATE_LEN];
-
-
-extern int nr_async_io_pending (void);
-extern void tux_exit (void);
-extern char * get_abuf (tux_req_t *req, unsigned int max_size);
-extern void send_abuf (tux_req_t *req, unsigned int size, unsigned long flags);
-
-
-extern int idle_event (tux_req_t *req);
-extern int output_space_event (tux_req_t *req);
-extern unsigned int log_cpu_mask;
-extern unsigned int tux_compression;
-extern unsigned int tux_noid;
-extern unsigned int tux_cgi_inherit_cpu;
-extern unsigned int tux_zerocopy_header;
-extern unsigned int tux_zerocopy_sendfile;
-extern unsigned int tux_cgi_cpu_mask;
-extern tux_proto_t tux_proto_http;
-extern tux_proto_t tux_proto_ftp;
-extern unsigned int tux_all_userspace;
-extern unsigned int tux_ignore_query;
-extern unsigned int tux_redirect_logging;
-extern unsigned int tux_referer_logging;
-extern unsigned int tux_log_incomplete;
-extern unsigned int tux_max_header_len;
-extern unsigned int tux_cpu_offset;
-extern unsigned int tux_ftp_login_message;
-
-extern void drop_permissions (void);
-extern int query_extcgi (tux_req_t *req);
-extern int tux_chroot (char *dir);
-
-extern void install_req_dentry (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt);
-extern void release_req_dentry (tux_req_t *req);
-extern void unidle_req (tux_req_t *req);
-extern int nr_requests_used (void);
-
-#define req_err(req) do { (req)->error = 1; Dprintk("request %p error at %s:%d.\n", req, __FILE__, __LINE__); } while (0)
-
-#define enough_wspace(sk) (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
-#define clear_keepalive(req) do { (req)->keep_alive = 0; Dprintk("keepalive cleared for req %p.\n", req); } while (0)
-
-extern int print_all_requests (threadinfo_t *ti);
-extern unsigned int tux_max_keepalives;
-extern int time_unix2ls (time_t zulu, char *buf);
-extern void last_mod_time(char * curr, const time_t t);
-extern int mdtm_time(char * curr, const time_t t);
-extern time_t parse_time(const char *str, const int str_len);
-
-extern unsigned int nr_tux_threads;
-extern threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS];
-
-#define switch_docroot(req) do { if (((req)->docroot_dentry != current->fs->root) || ((req)->docroot_mnt != current->fs->rootmnt)) __switch_docroot(req); } while (0)
-extern void __switch_docroot(tux_req_t *req);
-extern void list_directory (tux_req_t *req, int cachemiss);
-extern char * tux_print_path (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt, char *buf, unsigned int max_len);
-
-extern unsigned int tux_http_dir_indexing;
-
-int tux_gzip_compress (tux_req_t *req, unsigned char *data_in, unsigned char *data_out, __u32 *in_len, __u32 *out_len);
-
-struct dentry * __tux_lookup (tux_req_t *req, const char *filename,
- struct nameidata *base, struct vfsmount **mnt);
-
-/* error codes for req->error */
-#define TUX_ERROR_REDIRECT 1
-#define TUX_ERROR_UNUSED 2
-#define TUX_ERROR_CONN_CLOSE 3
-#define TUX_ERROR_CONN_TIMEOUT 4
-
-extern void __put_data_sock (tux_req_t *req);
-
-static inline void put_data_sock (tux_req_t *req)
-{
- if (req->data_sock)
- __put_data_sock(req);
-}
-
-#define socket_input(sock) \
- (!skb_queue_empty(&(sock)->sk->sk_receive_queue) || \
- !skb_queue_empty(&(sock)->sk->sk_error_queue))
-
-#define tux_kmalloc(size) \
-({ \
- void *__ptr; \
- \
- while (!(__ptr = kmalloc(size, GFP_KERNEL))) { \
- if (net_ratelimit()) \
- printk(KERN_WARNING "tux: OOM at %s:%d (%d bytes).\n", \
- __FILE__, __LINE__, size); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule_timeout(1); \
- } \
- __ptr; \
-})
-
-extern long tux_close(unsigned int fd);
-
-#endif
+++ /dev/null
-#ifndef _NET_TUX_U_H
-#define _NET_TUX_U_H
-
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * tux_u.h: HTTP module API - HTTP interface to user-space
- */
-
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define TUX_MAJOR_VERSION 3
-#define TUX_MINOR_VERSION 0
-#define TUX_PATCHLEVEL_VERSION 0
-
-#define __KERNEL_SYSCALLS__
-
-typedef enum http_versions {
- HTTP_1_0,
- HTTP_1_1
-} http_version_t;
-
-/*
- * Request methods known to HTTP:
- */
-typedef enum http_methods {
- METHOD_NONE,
- METHOD_GET,
- METHOD_HEAD,
- METHOD_POST,
- METHOD_PUT,
- NR_METHODS
-} http_method_t;
-
-enum user_req {
- TUX_ACTION_STARTUP = 1,
- TUX_ACTION_SHUTDOWN = 2,
- TUX_ACTION_STARTTHREAD = 3,
- TUX_ACTION_STOPTHREAD = 4,
- TUX_ACTION_EVENTLOOP = 5,
- TUX_ACTION_GET_OBJECT = 6,
- TUX_ACTION_SEND_OBJECT = 7,
- TUX_ACTION_READ_OBJECT = 8,
- TUX_ACTION_FINISH_REQ = 9,
- TUX_ACTION_FINISH_CLOSE_REQ = 10,
- TUX_ACTION_REGISTER_MODULE = 11,
- TUX_ACTION_UNREGISTER_MODULE = 12,
- TUX_ACTION_CURRENT_DATE = 13,
- TUX_ACTION_REGISTER_MIMETYPE = 14,
- TUX_ACTION_READ_HEADERS = 15,
- TUX_ACTION_POSTPONE_REQ = 16,
- TUX_ACTION_CONTINUE_REQ = 17,
- TUX_ACTION_REDIRECT_REQ = 18,
- TUX_ACTION_READ_POST_DATA = 19,
- TUX_ACTION_SEND_BUFFER = 20,
- TUX_ACTION_WATCH_PROXY_SOCKET = 21,
- TUX_ACTION_WAIT_PROXY_SOCKET = 22,
- TUX_ACTION_QUERY_VERSION = 23,
- MAX_TUX_ACTION
-};
-
-enum tux_ret {
- TUX_ERROR = -1,
- TUX_RETURN_USERSPACE_REQUEST = 0,
- TUX_RETURN_EXIT = 1,
- TUX_RETURN_SIGNAL = 2,
- TUX_CONTINUE_EVENTLOOP = 3,
-};
-
-#define MAX_URI_LEN 256
-#define MAX_COOKIE_LEN 128
-#define MAX_FIELD_LEN 64
-#define DATE_LEN 30
-
-typedef struct user_req_s {
- u32 version_major;
- u32 version_minor;
- u32 version_patch;
- u32 http_version;
- u32 http_method;
- u32 http_status;
-
- u32 sock;
- u32 event;
- u32 error;
- u32 thread_nr;
- u32 bytes_sent;
- u32 client_host;
- u32 objectlen;
- u32 module_index;
- u32 keep_alive;
- u32 cookies_len;
-
- u64 id;
- u64 priv;
- u64 object_addr;
-
- u8 query[MAX_URI_LEN];
- u8 objectname[MAX_URI_LEN];
- u8 cookies[MAX_COOKIE_LEN];
- u8 content_type[MAX_FIELD_LEN];
- u8 user_agent[MAX_FIELD_LEN];
- u8 accept[MAX_FIELD_LEN];
- u8 accept_charset[MAX_FIELD_LEN];
- u8 accept_encoding[MAX_FIELD_LEN];
- u8 accept_language[MAX_FIELD_LEN];
- u8 cache_control[MAX_FIELD_LEN];
- u8 if_modified_since[MAX_FIELD_LEN];
- u8 negotiate[MAX_FIELD_LEN];
- u8 pragma[MAX_FIELD_LEN];
- u8 referer[MAX_FIELD_LEN];
- u8 new_date[DATE_LEN];
- u8 pad[2];
-
-} user_req_t;
-
-typedef enum ftp_commands {
- FTP_COMM_NONE,
- FTP_COMM_USER,
- FTP_COMM_PASS,
- FTP_COMM_ACCT,
- FTP_COMM_CWD,
- FTP_COMM_CDUP,
- FTP_COMM_SMNT,
- FTP_COMM_QUIT,
- FTP_COMM_REIN,
- FTP_COMM_PORT,
- FTP_COMM_PASV,
- FTP_COMM_TYPE,
- FTP_COMM_STRU,
- FTP_COMM_MODE,
- FTP_COMM_RETR,
- FTP_COMM_SIZE,
- FTP_COMM_MDTM,
- FTP_COMM_STOR,
- FTP_COMM_STOU,
- FTP_COMM_APPE,
- FTP_COMM_ALLO,
- FTP_COMM_REST,
- FTP_COMM_RNFR,
- FTP_COMM_RNTO,
- FTP_COMM_ABOR,
- FTP_COMM_DELE,
- FTP_COMM_RMD,
- FTP_COMM_MKD,
- FTP_COMM_PWD,
- FTP_COMM_LIST,
- FTP_COMM_NLST,
- FTP_COMM_SITE,
- FTP_COMM_SYST,
- FTP_COMM_STAT,
- FTP_COMM_HELP,
- FTP_COMM_NOOP,
- FTP_COMM_FEAT,
- FTP_COMM_CLNT,
-} ftp_command_t;
-
-#endif
extern void udp_err(struct sk_buff *, u32);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr *usin, int addr_len);
extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
extern int xfrm4_output(struct sk_buff **pskb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
+extern int xfrm4_tunnel_check_size(struct sk_buff *skb);
extern int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
+extern int xfrm6_tunnel_check_size(struct sk_buff *skb);
extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
-extern int xfrm6_output(struct sk_buff **pskb);
#ifdef CONFIG_XFRM
extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
*/
struct ccs_modesel_head {
- __u8 _r1; /* reserved */
- __u8 medium; /* device-specific medium type */
- __u8 _r2; /* reserved */
- __u8 block_desc_length; /* block descriptor length */
- __u8 density; /* device-specific density code */
- __u8 number_blocks_hi; /* number of blocks in this block desc */
- __u8 number_blocks_med;
- __u8 number_blocks_lo;
- __u8 _r3;
- __u8 block_length_hi; /* block length for blocks in this desc */
- __u8 block_length_med;
- __u8 block_length_lo;
+ u8 _r1; /* reserved */
+ u8 medium; /* device-specific medium type */
+ u8 _r2; /* reserved */
+ u8 block_desc_length; /* block descriptor length */
+ u8 density; /* device-specific density code */
+ u8 number_blocks_hi; /* number of blocks in this block desc */
+ u8 number_blocks_med;
+ u8 number_blocks_lo;
+ u8 _r3;
+ u8 block_length_hi; /* block length for blocks in this desc */
+ u8 block_length_med;
+ u8 block_length_lo;
};
/*
* ScsiLun: 8 byte LUN.
*/
struct scsi_lun {
- __u8 scsi_lun[8];
+ u8 scsi_lun[8];
};
/*
container_of(d, struct Scsi_Host, shost_classdev)
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
-extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
+extern int scsi_add_host(struct Scsi_Host *, struct device *);
extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
#ifndef _SCSI_GENERIC_H
#define _SCSI_GENERIC_H
-#include <linux/compiler.h>
-
/*
History:
Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user
http://www.torque.net/sg/p/scsi-generic_long.txt
A version of this document (potentially out of date) may also be found in
the kernel source tree, probably at:
- Documentation/scsi/scsi-generic.txt .
+ /usr/src/linux/Documentation/scsi/scsi-generic.txt .
Utility and test programs are available at the sg web site. They are
bundled as sg_utils (for the lk 2.2 series) and sg3_utils (for the
#include <linux/time.h>
#include <asm/byteorder.h>
-#ifdef __LITTLE_ENDIAN
+#if __LITTLE_ENDIAN == 1234
#define SNDRV_LITTLE_ENDIAN
-#else
-#ifdef __BIG_ENDIAN
+#elif __BIG_ENDIAN == 4321
#define SNDRV_BIG_ENDIAN
#else
#error "Unsupported endian..."
#endif
-#endif
#else /* !__KERNEL__ */
int (*release) (snd_info_entry_t * entry,
unsigned short mode, void *file_private_data);
long (*read) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, char __user *buf,
- unsigned long count, unsigned long pos);
+ struct file * file, char __user *buf, long count);
long (*write) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, const char __user *buf,
- unsigned long count, unsigned long pos);
+ struct file * file, const char __user *buf, long count);
long long (*llseek) (snd_info_entry_t *entry, void *file_private_data,
struct file * file, long long offset, int orig);
unsigned int (*poll) (snd_info_entry_t *entry, void *file_private_data,
/*
* FIXME
* Ugh, we don't have PCI space, so map readb() and friends to use Zorro space
- * for MMIO accesses. This should make cirrusfb work again on Amiga
+ * for MMIO accesses. This should make clgenfb work again on Amiga
*/
-#undef inb_p
-#undef inw_p
-#undef outb_p
-#undef outw
-#undef readb
-#undef writeb
-#undef writew
#define inb_p(port) 0
#define inw_p(port) 0
#define outb_p(port, val) do { } while (0)
If unsure, say Y
+config STANDALONE
+ bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
+ default y
+ help
+ Select this option if you don't have magic firmware for drivers that
+ need it.
+
+ If unsure, say Y.
+
config BROKEN
bool
depends on !CLEAN_COMPILE
up to the user level program to do useful things with this
information. This is generally a good idea, so say Y.
-config BSD_PROCESS_ACCT_V3
- bool "BSD Process Accounting version 3 file format"
- depends on BSD_PROCESS_ACCT
- default n
- help
- If you say Y here, the process accounting information is written
- in a new file format that also logs the process IDs of each
- process and it's parent. Note that this file format is incompatible
- with previous v0/v1/v2 file formats, so you will need updated tools
- for processing it. A preliminary version of these tools is available
- at <http://http://www.de.kernel.org/pub/linux/utils/acct/>.
-
menu "Class Based Kernel Resource Management"
config CKRM
config CKRM_CPU_SCHEDULE
bool "CKRM CPU scheduler"
depends on CKRM_TYPE_TASKCLASS
- default y
+ default m
help
Use CKRM CPU scheduler instead of Linux Scheduler
Say N if unsure, Y to use the feature.
-config CKRM_RES_BLKIO
- tristate " Disk I/O Resource Controller"
- depends on CKRM_TYPE_TASKCLASS && IOSCHED_CFQ
+config CKRM_CPU_MONITOR
+ tristate "CKRM CPU Resoure Monitor"
+ depends on CKRM_CPU_SCHEDULE
default m
help
- Provides a resource controller for best-effort block I/O
- bandwidth control. The controller attempts this by proportional
- servicing of requests in the I/O scheduler. However, seek
- optimizations and reordering by device drivers/disk controllers may
- alter the actual bandwidth delivered to a class.
+ Monitor CPU Resource Usage of the classes
Say N if unsure, Y to use the feature.
-config CKRM_RES_MEM
- bool "Class based physical memory controller"
- default y
- depends on CKRM
- help
- Provide the basic support for collecting physical memory usage information
- among classes. Say Y if you want to know the memory usage of each class.
-
-config CKRM_MEM_LRUORDER_CHANGE
- bool "Change the LRU ordering of scanned pages"
- default n
- depends on CKRM_RES_MEM
- help
- While trying to free pages, by default(n), scanned pages are left were they
- are found if they belong to relatively under-used class. In this case the
- LRU ordering of the memory subsystemis left intact. If this option is chosen,
- then the scanned pages are moved to the tail of the list(active or inactive).
- Changing this to yes reduces the checking overhead but violates the approximate
- LRU order that is maintained by the paging subsystem.
-
config CKRM_TYPE_SOCKETCLASS
bool "Class Manager for socket groups"
depends on CKRM
endmenu
+config BSD_PROCESS_ACCT_V3
+ bool "BSD Process Accounting version 3 file format"
+ depends on BSD_PROCESS_ACCT
+ default n
+ help
+ If you say Y here, the process accounting information is written
+ in a new file format that also logs the process IDs of each
+ process and it's parent. Note that this file format is incompatible
+ with previous v0/v1/v2 file formats, so you will need updated tools
+ for processing it. A preliminary version of these tools is available
+ at <http://http://www.de.kernel.org/pub/linux/utils/acct/>.
+
+
config SYSCTL
bool "Sysctl support"
---help---
This option enables access to the kernel configuration file
through /proc/config.gz.
-config OOM_PANIC
- bool "OOM Panic"
- default y
- ---help---
- This option enables panic() to be called when a system is out of
- memory. This feature along with /proc/sys/kernel/panic allows a
- different behavior on out-of-memory conditions when the standard
- behavior (killing processes in an attempt to recover) does not
- make sense.
-
- If unsure, say N.
-
-config OOM_KILL
- bool
- depends on !OOM_PANIC
- default y
menuconfig EMBEDDED
bool "Configure standard kernel features (for small systems)"
mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
-extra-$(subst m,y,$(CONFIG_CRASH_DUMP)) += kerntypes.o
-CFLAGS_kerntypes.o := -gstabs
-
# files to be removed upon make clean
clean-files := ../include/linux/compile.h
+++ /dev/null
-/*
- * kerntypes.c
- *
- * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
- * Matt D. Robinson (yakker@alacritech.com)
- *
- * Dummy module that includes headers for all kernel types of interest.
- * The kernel type information is used by the lcrash utility when
- * analyzing system crash dumps or the live system. Using the type
- * information for the running system, rather than kernel header files,
- * makes for a more flexible and robust analysis tool.
- *
- * This source code is released under version 2 of the GNU GPL.
- */
-
-#include <linux/compile.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/config.h>
-#include <linux/utsname.h>
-#include <linux/dump.h>
-
-#ifdef LINUX_COMPILE_VERSION_ID_TYPE
-/* Define version type for version validation of dump and kerntypes */
-LINUX_COMPILE_VERSION_ID_TYPE;
-#endif
-
-void
-kerntypes_dummy(void)
-{
-}
#include <asm/setup.h>
#include <linux/ckrm.h>
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
int __init init_ckrm_sched_res(void);
-#else
-#define init_ckrm_sched_res() ((void)0)
-#endif
-//#include <linux/ckrm_sched.h>
+
/*
* This is one of the first .c files built. Error out early
enum system_states system_state;
EXPORT_SYMBOL(system_state);
-/*
- * The kernel_magic value represents the address of _end, which allows
- * namelist tools to "match" each other respectively. That way a tool
- * that looks at /dev/mem can verify that it is using the right System.map
- * file -- if kernel_magic doesn't equal the namelist value of _end,
- * something's wrong.
- */
-extern unsigned long _end;
-unsigned long *kernel_magic = &_end;
-
/*
* Boot command-line arguments
*/
* printk() and can access its per-cpu storage.
*/
smp_prepare_boot_cpu();
-
/*
* Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init()
* firmware files.
*/
populate_rootfs();
-
do_basic_setup();
-
init_ckrm_sched_res();
sched_init_smp();
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/version.h>
-#include <linux/stringify.h>
#define version(a) Version_ ## a
#define version_string(a) version(a)
const char *linux_banner =
"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
-const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
-LINUX_COMPILE_VERSION_ID_TYPE;
goto out_inode;
}
/* all is ok */
+#warning MEF PLANETLAB: info->user = get_uid(u); is something new in Fedora Core.
info->user = get_uid(u);
} else if (S_ISDIR(mode)) {
inode->i_nlink++;
shm_unlock(shp);
if (!is_file_hugepages(shp->shm_file))
shmem_lock(shp->shm_file, 0, shp->mlock_user);
- else
- user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
- shp->mlock_user);
fput (shp->shm_file);
security_shm_free(shp);
ipc_rcu_free(shp, sizeof(struct shmid_kernel));
shp->shm_perm.key = key;
shp->shm_perm.xid = current->xid;
shp->shm_flags = (shmflg & S_IRWXUGO);
- shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
return error;
}
- if (shmflg & SHM_HUGETLB) {
- /* hugetlb_zero_setup takes care of mlock user accounting */
+ if (shmflg & SHM_HUGETLB)
file = hugetlb_zero_setup(size);
- shp->mlock_user = current->user;
- } else {
+ else {
sprintf (name, "SYSV%08x", key);
file = shmem_file_setup(name, size, VM_ACCOUNT);
}
shp->shm_nattch = 0;
shp->id = shm_buildid(id,shp->shm_perm.seq);
shp->shm_file = file;
+ shp->mlock_user = NULL;
file->f_dentry->d_inode->i_ino = shp->id;
if (shmflg & SHM_HUGETLB)
set_file_hugepages(file);
case SHM_UNLOCK:
{
/* Allow superuser to lock segment in memory */
- if (!can_do_mlock() && cmd == SHM_LOCK) {
+ if (!can_do_mlock()) {
err = -EPERM;
goto out;
}
goto out_unlock;
if(cmd==SHM_LOCK) {
- struct user_struct * user = current->user;
if (!is_file_hugepages(shp->shm_file)) {
- err = shmem_lock(shp->shm_file, 1, user);
- if (!err) {
+ err = shmem_lock(shp->shm_file, 1, current->user);
+ if (!err)
shp->shm_flags |= SHM_LOCKED;
- shp->mlock_user = user;
- }
}
- } else if (!is_file_hugepages(shp->shm_file)) {
- shmem_lock(shp->shm_file, 0, shp->mlock_user);
+ } else {
+ if (!is_file_hugepages(shp->shm_file))
+ shmem_lock(shp->shm_file, 0, shp->mlock_user);
shp->shm_flags &= ~SHM_LOCKED;
- shp->mlock_user = NULL;
}
shm_unlock(shp);
goto out;
#include <asm/unistd.h>
-#include <asm/unistd.h>
-
#include "util.h"
/**
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
- !capable(CAP_IPC_OWNER))
- return -1;
+ !capable(CAP_IPC_OWNER)) {
+ if (!can_do_mlock()) {
+ return -1;
+ }
+ }
return security_ipc_permission(ipcp, flag);
}
obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_IKCONFIG_PROC) += configs.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
-obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_classqueue.o ckrm_sched.o
+obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_classqueue.o
+obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_sched.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
+obj-$(CONFIG_KGDB) += kgdbstub.o
+
ifneq ($(CONFIG_IA64),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
*/
memset((caddr_t)&ac, 0, sizeof(acct_t));
- ac.ac_version = ACCT_VERSION | ACCT_BYTEORDER;
+ ac.ac_version = ACCT_VERSION;
strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
elapsed = jiffies_64_to_AHZ(get_jiffies_64() - current->start_time);
old_encode_dev(tty_devnum(current->signal->tty)) : 0;
read_unlock(&tasklist_lock);
- ac.ac_flag = 0;
+ /* ABYTESEX is always set to allow byte order detection */
+ ac.ac_flag = ABYTESEX;
if (current->flags & PF_FORKNOEXEC)
ac.ac_flag |= AFORK;
if (current->flags & PF_SUPERPRIV)
#
ifeq ($(CONFIG_CKRM),y)
- obj-y = ckrm.o ckrmutils.o ckrm_numtasks_stub.o rbce/
+ obj-y = ckrm.o ckrmutils.o ckrm_tasks_stub.o rbce/
endif
obj-$(CONFIG_CKRM_TYPE_TASKCLASS) += ckrm_tc.o
- obj-$(CONFIG_CKRM_RES_NUMTASKS) += ckrm_numtasks.o
+ obj-$(CONFIG_CKRM_RES_NUMTASKS) += ckrm_tasks.o
obj-$(CONFIG_CKRM_TYPE_SOCKETCLASS) += ckrm_sockc.o
- obj-$(CONFIG_CKRM_RES_LISTENAQ) += ckrm_laq.o
- obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_cpu_class.o ckrm_cpu_monitor.o
- obj-$(CONFIG_CKRM_RES_MEM) += ckrm_mem.o
+ obj-$(CONFIG_CKRM_RES_LISTENAQ) += ckrm_listenaq.o
+ obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_cpu_class.o
+ obj-$(CONFIG_CKRM_CPU_MONITOR) += ckrm_cpu_monitor.o
static inline void set_callbacks_active(struct ckrm_classtype *ctype)
{
- ctype->ce_cb_active = ((atomic_read(&ctype->ce_regd) > 0) &&
+ ctype->ce_cb_active = ((atomic_read(&ctype->ce_nr_users) > 0) &&
(ctype->ce_callbacks.always_callback
|| (ctype->num_classes > 1)));
}
if (ctype == NULL)
return (-ENOENT);
- atomic_inc(&ctype->ce_regd);
-
- /* another engine registered or trying to register ? */
- if (atomic_read(&ctype->ce_regd) != 1) {
- atomic_dec(&ctype->ce_regd);
+ ce_protect(ctype);
+ if (atomic_read(&ctype->ce_nr_users) != 1) {
+ // Some engine is acive, deregister it first.
+ ce_release(ctype);
return (-EBUSY);
}
if (!(((ecbs->classify) && (ecbs->class_delete)) || (ecbs->notify)) ||
(ecbs->c_interest && ecbs->classify == NULL) ||
(ecbs->n_interest && ecbs->notify == NULL)) {
- atomic_dec(&ctype->ce_regd);
+ ce_release(ctype);
return (-EINVAL);
}
+ /* Is any other engine registered for this classtype ? */
+ if (ctype->ce_regd) {
+ ce_release(ctype);
+ return (-EINVAL);
+ }
+
+ ctype->ce_regd = 1;
ctype->ce_callbacks = *ecbs;
set_callbacks_active(ctype);
ctype->ce_cb_active = 0;
- if (atomic_read(&ctype->ce_nr_users) > 1) {
+ if (atomic_dec_and_test(&ctype->ce_nr_users) != 1) {
// Somebody is currently using the engine, cannot deregister.
- return (-EAGAIN);
+ atomic_inc(&ctype->ce_nr_users);
+ return (-EBUSY);
}
- atomic_set(&ctype->ce_regd, 0);
+ ctype->ce_regd = 0;
memset(&ctype->ce_callbacks, 0, sizeof(ckrm_eng_callback_t));
return 0;
}
CLS_DEBUG("name %s => %p\n", name ? name : "default", dcore);
if ((dcore != clstype->default_class) && (!ckrm_is_core_valid(parent))){
- printk(KERN_DEBUG "error not a valid parent %p\n", parent);
+ printk("error not a valid parent %p\n", parent);
return -EINVAL;
}
#if 0
(void **)kmalloc(clstype->max_resid * sizeof(void *),
GFP_KERNEL);
if (dcore->res_class == NULL) {
- printk(KERN_DEBUG "error no mem\n");
+ printk("error no mem\n");
return -ENOMEM;
}
}
parent->name);
if (core->delayed) {
/* this core was marked as late */
- printk(KERN_DEBUG "class <%s> finally deleted %lu\n", core->name, jiffies);
+ printk("class <%s> finally deleted %lu\n", core->name, jiffies);
}
if (ckrm_remove_child(core) == 0) {
- printk(KERN_DEBUG "Core class removal failed. Chilren present\n");
+ printk("Core class removal failed. Chilren present\n");
}
for (i = 0; i < clstype->max_resid; i++) {
*/
read_lock(&ckrm_class_lock);
list_for_each_entry(core, &clstype->classes, clslist) {
- printk(KERN_INFO "CKRM .. create res clsobj for resouce <%s>"
+ printk("CKRM .. create res clsobj for resouce <%s>"
"class <%s> par=%p\n", rcbs->res_name,
core->name, core->hnode.parent);
ckrm_alloc_res_class(core, core->hnode.parent, resid);
}
#define ECC_PRINTK(fmt, args...) \
-// printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+// printk("%s: " fmt, __FUNCTION__ , ## args)
void ckrm_invoke_event_cb_chain(enum ckrm_event ev, void *arg)
{
void __init ckrm_init(void)
{
- printk(KERN_DEBUG "CKRM Initialization\n");
+ printk("CKRM Initialization\n");
// register/initialize the Metatypes
#endif
// prepare init_task and then rely on inheritance of properties
ckrm_cb_newtask(&init_task);
- printk(KERN_DEBUG "CKRM Initialization done\n");
+ printk("CKRM Initialization done\n");
}
EXPORT_SYMBOL(ckrm_register_engine);
#include <linux/ckrm_classqueue.h>
#include <linux/seq_file.h>
-struct ckrm_res_ctlr cpu_rcbs;
-/**
- * insert_cpu_class - insert a class to active_cpu_class list
- *
- * insert the class in decreasing order of class weight
- */
-static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
-{
- list_add(&cls->links,&active_cpu_classes);
-}
+struct ckrm_res_ctlr cpu_rcbs;
/*
* initialize a class object and its local queues
*/
-void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares)
+ static void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares)
{
int i,j,k;
prio_array_t *array;
- ckrm_lrq_t* queue;
-
- cls->shares = *shares;
- cls->cnt_lock = SPIN_LOCK_UNLOCKED;
- ckrm_cpu_stat_init(&cls->stat);
- ckrm_usage_init(&cls->usage);
- cls->magic = CKRM_CPU_CLASS_MAGIC;
+ struct ckrm_local_runqueue* queue;
for (i = 0 ; i < NR_CPUS ; i++) {
queue = &cls->local_queues[i];
queue->top_priority = MAX_PRIO;
cq_node_init(&queue->classqueue_linkobj);
queue->local_cvt = 0;
- queue->lrq_load = 0;
- queue->local_weight = cpu_class_weight(cls);
+ queue->uncounted_cvt = 0;
queue->uncounted_ns = 0;
- queue->savings = 0;
queue->magic = 0x43FF43D7;
}
+ cls->shares = *shares;
+ cls->global_cvt = 0;
+ cls->cnt_lock = SPIN_LOCK_UNLOCKED;
+ ckrm_cpu_stat_init(&cls->stat);
+
// add to class list
write_lock(&class_list_lock);
- insert_cpu_class(cls);
+ list_add(&cls->links,&active_cpu_classes);
write_unlock(&class_list_lock);
}
static inline void set_default_share(ckrm_shares_t *shares)
{
shares->my_guarantee = 0;
- shares->total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
shares->my_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
+ shares->total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
shares->max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
- shares->cur_max_limit = 0;
+ shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+ shares->cur_max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
}
-struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
-{
- struct ckrm_cpu_class * cls;
- cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
- if (valid_cpu_class(cls))
- return cls;
- else
- return NULL;
+struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core) {
+ return ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
}
struct ckrm_cpu_class *cls;
if (! parent) /*root class*/
- cls = get_default_cpu_class();
+ cls = default_cpu_class;
else
cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
cls->parent = parent;
}
} else
- printk(KERN_ERR"alloc_cpu_class failed\n");
+ printk("alloc_cpu_class failed GFP_ATOMIC\n");
return cls;
}
return;
/*the default class can't be freed*/
- if (cls == get_default_cpu_class())
+ if (cls == default_cpu_class)
return;
// Assuming there will be no children when this function is called
write_unlock(&class_list_lock);
kfree(cls);
-
- //call ckrm_cpu_monitor after class removed
- ckrm_cpu_monitor(0);
}
/*
parres = NULL;
}
- /*
- * hzheng: CKRM_SHARE_DONTCARE should be handled
- */
- if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
- new_share->my_guarantee = 0;
-
rc = set_shares(new_share, cur, par);
- if (cur->my_limit == CKRM_SHARE_DONTCARE)
- cur->my_limit = cur->max_limit;
-
spin_unlock(&cls->cnt_lock);
if (cls->parent) {
spin_unlock(&parres->cnt_lock);
}
-
- //call ckrm_cpu_monitor after changes are changed
- ckrm_cpu_monitor(0);
-
return rc;
}
+/*
+ * translate the global_CVT to ticks
+ */
static int ckrm_cpu_get_share(void *my_res,
struct ckrm_shares *shares)
{
int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
{
struct ckrm_cpu_class *cls = my_res;
- struct ckrm_cpu_class_stat* stat = &cls->stat;
- ckrm_lrq_t* lrq;
- int i;
if (!cls)
return -EINVAL;
seq_printf(sfile, "-------- CPU Class Status Start---------\n");
- seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
+ seq_printf(sfile, " gua= %d limit= %d\n",
cls->shares.my_guarantee,
- cls->shares.my_limit,
+ cls->shares.my_limit);
+ seq_printf(sfile, " total_gua= %d limit= %d\n",
cls->shares.total_guarantee,
cls->shares.max_limit);
- seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
+ seq_printf(sfile, " used_gua= %d cur_limit= %d\n",
cls->shares.unused_guarantee,
cls->shares.cur_max_limit);
- seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
- seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
- seq_printf(sfile, "\tehl= %d\n",stat->ehl);
- seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
- seq_printf(sfile, "\teshare= %d\n",stat->eshare);
- seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
- seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
- seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
- seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
- get_ckrm_usage(cls,2*HZ),
- get_ckrm_usage(cls,10*HZ),
- get_ckrm_usage(cls,60*HZ)
- );
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(cls,i);
- seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
- }
-
+ seq_printf(sfile, " Share= %d\n",cpu_class_weight(cls));
+ seq_printf(sfile, " cvt= %llu\n",cls->local_queues[0].local_cvt);
+ seq_printf(sfile, " total_ns= %llu\n",cls->stat.total_ns);
+ seq_printf(sfile, " prio= %d\n",cls->local_queues[0].classqueue_linkobj.prio);
+ seq_printf(sfile, " index= %d\n",cls->local_queues[0].classqueue_linkobj.index);
+ seq_printf(sfile, " run= %llu\n",cls->stat.local_stats[0].run);
+ seq_printf(sfile, " total= %llu\n",cls->stat.local_stats[0].total);
+ seq_printf(sfile, " cpu_demand= %lu\n",cls->stat.cpu_demand);
+
+ seq_printf(sfile, " effective_guarantee= %d\n",cls->stat.effective_guarantee);
+ seq_printf(sfile, " effective_limit= %d\n",cls->stat.effective_limit);
+ seq_printf(sfile, " effective_share= %d\n",cls->stat.effective_share);
seq_printf(sfile, "-------- CPU Class Status END ---------\n");
+
return 0;
}
/*
* task will remain in the same cpu but on a different local runqueue
*/
-void ckrm_cpu_change_class(void *task, void *old, void *new)
+static void ckrm_cpu_change_class(void *task, void *old, void *new)
{
struct task_struct *tsk = task;
struct ckrm_cpu_class *newcls = new;
+ unsigned long flags;
+ struct runqueue *rq;
+ prio_array_t *array;
/*sanity checking*/
if (!task || ! old || !new)
return;
- _ckrm_cpu_change_class(tsk,newcls);
+ rq = task_rq_lock(tsk,&flags);
+ array = tsk->array;
+ if (array) {
+ dequeue_task(tsk,array);
+ tsk->cpu_class = newcls;
+ enqueue_task(tsk,rq_active(tsk,rq));
+ } else {
+ tsk->cpu_class = newcls;
+ }
+ task_rq_unlock(rq,&flags);
}
/*dummy function, not used*/
if (!cls)
return -EINVAL;
- printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
+ printk("ckrm_cpu config='%s'\n",cfgstr);
return 0;
}
struct ckrm_res_ctlr cpu_rcbs = {
- .res_name = "cpu",
+ .res_name = "CKRM CPU Class",
.res_hdepth = 1,
.resid = -1,
.res_alloc = ckrm_alloc_cpu_class,
if (resid == -1) { /*not registered */
resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
- printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
+ printk("........init_ckrm_sched_res , resid= %d\n",resid);
}
return 0;
}
//init classqueues for each processor
for (i=0; i < NR_CPUS; i++)
classqueue_init(get_cpu_classqueue(i));
-
- /*
- * hzheng: initialize the default cpu class
- * required for E14/E15 since ckrm_init is called after sched_init
- */
+/*
+ * hzheng: initialize the default cpu class
+ * required for E14 since ckrm_init is called after sched_init
+ */
ckrm_alloc_cpu_class(NULL,NULL);
}
#include <asm/div64.h>
#include <linux/ckrm_sched.h>
-#define CPU_MONITOR_INTERVAL (HZ) /*how often do we adjust the shares*/
+#define CPU_MONITOR_INTERVAL (4*HZ) /*how often do we adjust the shares*/
+#define CKRM_SHARE_ACCURACY 7
#define CKRM_SHARE_MAX (1<<CKRM_SHARE_ACCURACY)
-#define CKRM_CPU_DEMAND_RUN 0
-#define CKRM_CPU_DEMAND_SLEEP 1
-//sample task cpu demand every 64ms
-#define CPU_DEMAND_TASK_RECALC (64000000LL)
-#define CPU_DEMAND_CLASS_RECALC (256000000LL)
-#define CPU_DEMAND_TP_CLASS 0
-#define CPU_DEMAND_TP_TASK 1
-
extern struct ckrm_cpu_class *ckrm_get_cpu_class(struct ckrm_core_class *core);
-void update_ckrm_idle(unsigned long surplus);
-
-/*interface to share definition*/
-static inline int get_soft_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.my_limit;
-}
-
-static inline int get_mysoft_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-static inline int get_hard_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-static inline int get_myhard_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-
-static inline void cpu_demand_stat_init(struct ckrm_cpu_demand_stat* local_stat, int type)
-{
- unsigned long long now = sched_clock();
-
- local_stat->run = 0;
- local_stat->total = 0;
- local_stat->last_sleep = now;
- switch (type) {
- case CPU_DEMAND_TP_CLASS:
- local_stat->recalc_interval = CPU_DEMAND_CLASS_RECALC;
- local_stat->cpu_demand = 0;
- break;
- case CPU_DEMAND_TP_TASK:
- local_stat->recalc_interval = CPU_DEMAND_TASK_RECALC;
- //for task, the init cpu_demand is copied from its parent
- break;
- default:
- BUG();
- }
-}
void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat)
{
int i;
+ struct ckrm_cpu_class_local_stat* local_stat;
+ unsigned long long now = sched_clock();
stat->stat_lock = SPIN_LOCK_UNLOCKED;
stat->total_ns = 0;
- stat->max_demand = 0;
+ stat->cpu_demand = 0;
for (i=0; i< NR_CPUS; i++) {
- cpu_demand_stat_init(&stat->local_stats[i],CPU_DEMAND_TP_CLASS);
+ local_stat = &stat->local_stats[i];
+ local_stat->run = 0;
+ local_stat->total = 0;
+ local_stat->last_sleep = now;
+ local_stat->cpu_demand = 0;
}
- stat->egrt = 0;
- stat->megrt = 0;
- stat->ehl = CKRM_SHARE_MAX; /*default: no limit*/
- stat->mehl = CKRM_SHARE_MAX; /*default: no limit */
-
- stat->eshare = CKRM_SHARE_MAX;
- stat->meshare = CKRM_SHARE_MAX;
+ stat->effective_guarantee = 0;
+ stat->effective_limit = 0;
+ stat->glut = 0;
+ stat->effective_share = 100;
+ stat->self_effective_share = 100;
}
-
/**********************************************/
/* cpu demand */
/**********************************************/
*/
/**
- * update_cpu_demand_stat -
+ * update_cpu_demand - update a state change
*
- * should be called whenever the state of a task/task local queue changes
+ * should be called whenever the state of a local queue changes
* -- when deschedule : report how much run
* -- when enqueue: report how much sleep
*
- * how often should we recalculate the cpu demand
- * the number is in ns
+ * to deal with excessive long run/sleep state
+ * -- whenever the the ckrm_cpu_monitor is called, check if the class is in sleep state, if yes, then update sleep record
*/
-static inline void update_cpu_demand_stat(struct ckrm_cpu_demand_stat* local_stat,int state, unsigned long long len)
+#define CKRM_CPU_DEMAND_RUN 0
+#define CKRM_CPU_DEMAND_SLEEP 1
+//how often should we recalculate the cpu demand, in ns
+#define CPU_DEMAND_CAL_THRESHOLD (1000000000LL)
+static inline void update_local_cpu_demand(struct ckrm_cpu_class_local_stat* local_stat,int state, unsigned long long len)
{
local_stat->total += len;
if (state == CKRM_CPU_DEMAND_RUN)
local_stat->run += len;
- if (local_stat->total >= local_stat->recalc_interval) {
+ if (local_stat->total >= CPU_DEMAND_CAL_THRESHOLD) {
local_stat->total >>= CKRM_SHARE_ACCURACY;
- if (unlikely(local_stat->run > 0xFFFFFFFF))
- local_stat->run = 0xFFFFFFFF;
-
- if (local_stat->total > 0xFFFFFFFF)
+ if (local_stat->total > 0xFFFFFFFF)
local_stat->total = 0xFFFFFFFF;
-
- do_div(local_stat->run,(unsigned long)local_stat->total);
- if (local_stat->total > 0xFFFFFFFF) //happens after very long sleep
- local_stat->cpu_demand = local_stat->run;
- else {
- local_stat->cpu_demand += local_stat->run;
- local_stat->cpu_demand >>= 1;
- }
+ do_div(local_stat->run,(unsigned long)local_stat->total);
+ local_stat->cpu_demand +=local_stat->run;
+ local_stat->cpu_demand >>= 1;
local_stat->total = 0;
local_stat->run = 0;
}
}
+static inline void cpu_demand_update_run(struct ckrm_cpu_class_local_stat* local_stat, unsigned long long len)
+{
+ update_local_cpu_demand(local_stat,CKRM_CPU_DEMAND_RUN,len);
+}
+
+static inline void cpu_demand_update_sleep(struct ckrm_cpu_class_local_stat* local_stat, unsigned long long len)
+{
+ update_local_cpu_demand(local_stat,CKRM_CPU_DEMAND_SLEEP,len);
+}
+
+#define CPU_DEMAND_ENQUEUE 0
+#define CPU_DEMAND_DEQUEUE 1
+#define CPU_DEMAND_DESCHEDULE 2
+
/**
* cpu_demand_event - and cpu_demand event occured
* @event: one of the following three events:
* CPU_DEMAND_DESCHEDULE: one task belong a certain local class deschedule
* @len: valid only for CPU_DEMAND_DESCHEDULE, how long the task has been run
*/
-void cpu_demand_event(struct ckrm_cpu_demand_stat* local_stat, int event, unsigned long long len)
+void cpu_demand_event(struct ckrm_cpu_class_local_stat* local_stat, int event, unsigned long long len)
{
switch (event) {
case CPU_DEMAND_ENQUEUE:
len = sched_clock() - local_stat->last_sleep;
local_stat->last_sleep = 0;
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_SLEEP,len);
+ cpu_demand_update_sleep(local_stat,len);
break;
case CPU_DEMAND_DEQUEUE:
- if (! local_stat->last_sleep) {
- local_stat->last_sleep = sched_clock();
- }
+ local_stat->last_sleep = sched_clock();
break;
case CPU_DEMAND_DESCHEDULE:
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_RUN,len);
- break;
- case CPU_DEMAND_INIT: //for task init only
- cpu_demand_stat_init(local_stat,CPU_DEMAND_TP_TASK);
+ cpu_demand_update_run(local_stat,len);
break;
default:
BUG();
/**
* check all the class local queue
- *
- * to deal with excessive long run/sleep state
- * -- whenever the the ckrm_cpu_monitor is called, check if the class is in sleep state, if yes, then update sleep record
+ * if local queueu is not in runqueue, then it's in sleep state
+ * if compare to last sleep,
*/
static inline void cpu_demand_check_sleep(struct ckrm_cpu_class_stat *stat, int cpu)
{
- struct ckrm_cpu_demand_stat * local_stat = &stat->local_stats[cpu];
+ struct ckrm_cpu_class_local_stat * local_stat = &stat->local_stats[cpu];
unsigned long long sleep,now;
if (local_stat->last_sleep) {
now = sched_clock();
sleep = now - local_stat->last_sleep;
local_stat->last_sleep = now;
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_SLEEP,sleep);
+ cpu_demand_update_sleep(local_stat,sleep);
}
}
*
* self_cpu_demand = sum(cpu demand of all local queues)
*/
-static inline unsigned long get_self_cpu_demand(struct ckrm_cpu_class_stat *stat)
+static unsigned long get_self_cpu_demand(struct ckrm_cpu_class_stat
+ *stat)
{
int cpu_demand = 0;
int i;
- int cpuonline = 0;
for_each_online_cpu(i) {
cpu_demand_check_sleep(stat,i);
cpu_demand += stat->local_stats[i].cpu_demand;
- cpuonline ++;
}
- return (cpu_demand/cpuonline);
+ if (cpu_demand > CKRM_SHARE_MAX)
+ cpu_demand = CKRM_SHARE_MAX;
+ return cpu_demand;
}
/*
- * my max demand = min(cpu_demand, my effective hard limit)
+ * update effective cpu demand for each class
+ * assume the root_core->parent == NULL
*/
-static inline unsigned long get_mmax_demand(struct ckrm_cpu_class_stat* stat)
-{
- unsigned long mmax_demand = get_self_cpu_demand(stat);
- if (mmax_demand > stat->mehl)
- mmax_demand = stat->mehl;
-
- return mmax_demand;
-}
-
-/**
- * update_max_demand: update effective cpu demand for each class
- * return -1 on error
- *
- * Assume: the root_core->parent == NULL
- */
-static int update_max_demand(struct ckrm_core_class *root_core)
+static void update_cpu_demand(struct ckrm_core_class *root_core)
{
struct ckrm_core_class *cur_core, *child_core;
- struct ckrm_cpu_class *cls,*c_cls;
- int ret = -1;
+ struct ckrm_cpu_class *cls;
cur_core = root_core;
child_core = NULL;
-
- repeat:
- if (!cur_core) { //normal exit
- ret = 0;
- goto out;
- }
+ /*
+ * iterate the tree
+ * update cpu_demand of each node
+ */
+ repeat:
+ if (!cur_core)
+ return;
cls = ckrm_get_cpu_class(cur_core);
- if (! cls) //invalid c_cls, abort
- goto out;
-
if (!child_core) //first child
- cls->stat.max_demand = get_mmax_demand(&cls->stat);
+ cls->stat.cpu_demand = get_self_cpu_demand(&cls->stat);
else {
- c_cls = ckrm_get_cpu_class(child_core);
- if (c_cls)
- cls->stat.max_demand += c_cls->stat.max_demand;
- else //invalid c_cls, abort
- goto out;
+ cls->stat.cpu_demand +=
+ ckrm_get_cpu_class(child_core)->stat.cpu_demand;
+ if (cls->stat.cpu_demand > CKRM_SHARE_MAX)
+ cls->stat.cpu_demand = CKRM_SHARE_MAX;
}
- //check class hard limit
- if (cls->stat.max_demand > cls->stat.ehl)
- cls->stat.max_demand = cls->stat.ehl;
-
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
if (child_core) {
cur_core = child_core->hnode.parent;
}
goto repeat;
- out:
- return ret;
}
/**********************************************/
/* effective guarantee & limit */
/**********************************************/
-static inline void set_eshare(struct ckrm_cpu_class_stat *stat,
+static inline void set_effective_share(struct ckrm_cpu_class_stat *stat,
int new_share)
{
if (!new_share)
new_share = 1;
-
- BUG_ON(new_share < 0);
- stat->eshare = new_share;
+ stat->effective_share = new_share;
}
-static inline void set_meshare(struct ckrm_cpu_class_stat *stat,
+static inline void set_self_effective_share(struct ckrm_cpu_class_stat *stat,
int new_share)
{
if (!new_share)
new_share = 1;
-
- BUG_ON(new_share < 0);
- stat->meshare = new_share;
+ stat->self_effective_share = new_share;
}
-/**
- *update_child_effective - update egrt, ehl, mehl for all children of parent
- *@parent: the parent node
- *return -1 if anything wrong
- *
- */
-static int update_child_effective(struct ckrm_core_class *parent)
+static inline void update_child_effective(struct ckrm_core_class *parent)
{
struct ckrm_cpu_class *p_cls = ckrm_get_cpu_class(parent);
- struct ckrm_core_class *child_core;
- int ret = -1;
-
- if (! p_cls)
- return ret;
+ struct ckrm_core_class *child_core = ckrm_get_next_child(parent, NULL);
- child_core = ckrm_get_next_child(parent, NULL);
while (child_core) {
struct ckrm_cpu_class *c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- return ret;
- c_cls->stat.egrt =
- p_cls->stat.egrt *
+ c_cls->stat.effective_guarantee =
+ p_cls->stat.effective_guarantee *
c_cls->shares.my_guarantee / p_cls->shares.total_guarantee;
-
- c_cls->stat.megrt = c_cls->stat.egrt * c_cls->shares.unused_guarantee
- / c_cls->shares.total_guarantee;
-
- c_cls->stat.ehl =
- p_cls->stat.ehl *
- get_hard_limit(c_cls) / p_cls->shares.total_guarantee;
-
- c_cls->stat.mehl =
- c_cls->stat.ehl *
- get_myhard_limit(c_cls) / c_cls->shares.total_guarantee;
-
- set_eshare(&c_cls->stat,c_cls->stat.egrt);
- set_meshare(&c_cls->stat,c_cls->stat.megrt);
-
+ c_cls->stat.effective_limit =
+ p_cls->stat.effective_guarantee * c_cls->shares.my_limit /
+ p_cls->shares.total_guarantee;
child_core = ckrm_get_next_child(parent, child_core);
};
- return 0;
+
}
-/**
- * update_effectives: update egrt, ehl, mehl for the whole tree
+/*
+ * update effective guarantee and effective limit
+ * -- effective share = parent->effective->share * share/parent->total_share
+ * -- effective limit = parent->effective->share * limit/parent->total_share
* should be called only when class structure changed
- *
- * return -1 if anything wrong happened (eg: the structure changed during the process)
*/
-static int update_effectives(struct ckrm_core_class *root_core)
+static void update_effective_guarantee_limit(struct ckrm_core_class *root_core)
{
- struct ckrm_core_class *cur_core, *child_core;
+ struct ckrm_core_class *cur_core, *child_core = NULL;
struct ckrm_cpu_class *cls;
- int ret = -1;
cur_core = root_core;
- child_core = NULL;
cls = ckrm_get_cpu_class(cur_core);
+ cls->stat.effective_guarantee = CKRM_SHARE_MAX;
+ cls->stat.effective_limit = cls->stat.effective_guarantee;
- //initialize the effectives for root
- cls->stat.egrt = CKRM_SHARE_MAX; /*egrt of the root is always 100% */
- cls->stat.megrt = cls->stat.egrt * cls->shares.unused_guarantee
- / cls->shares.total_guarantee;
- cls->stat.ehl = CKRM_SHARE_MAX * get_hard_limit(cls)
- / cls->shares.total_guarantee;
- cls->stat.mehl = cls->stat.ehl * get_myhard_limit(cls)
- / cls->shares.total_guarantee;
- set_eshare(&cls->stat,cls->stat.egrt);
- set_meshare(&cls->stat,cls->stat.megrt);
-
- repeat:
+ repeat:
//check exit
if (!cur_core)
- return 0;
+ return;
- //visit this node only once
- if (! child_core)
- if (update_child_effective(cur_core) < 0)
- return ret; //invalid cur_core node
-
+ //visit this node
+ update_child_effective(cur_core);
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
-
if (child_core) {
- //go down to the next hier
+ //go down
cur_core = child_core;
child_core = NULL;
- } else { //no more child, go back
+ goto repeat;
+ } else { //no more child, go back
child_core = cur_core;
cur_core = child_core->hnode.parent;
}
/**********************************************/
/*
- * surplus = egrt - demand
+ * surplus = my_effective_share - demand
* if surplus < 0, surplus = 0
*/
static inline int get_node_surplus(struct ckrm_cpu_class *cls)
{
- int surplus = cls->stat.egrt - cls->stat.max_demand;
+ int surplus = cls->stat.effective_guarantee - cls->stat.cpu_demand;
if (surplus < 0)
surplus = 0;
return surplus;
}
-static inline int get_my_node_surplus(struct ckrm_cpu_class *cls)
-{
- int surplus = cls->stat.megrt - get_mmax_demand(&cls->stat);
-
- if (surplus < 0)
- surplus = 0;
-
- return surplus;
-}
-
-/**
- * consume_surplus: decides how much surplus a node can consume
- * @ckeck_sl: if check_sl is set, then check soft_limitx
+/*
+ * consume the surplus
* return how much consumed
- *
- * implements all the CKRM Scheduling Requirement
- * assume c_cls is valid
+ * set glut when necessary
*/
-static inline int consume_surplus(int surplus,
- struct ckrm_cpu_class *c_cls,
- struct ckrm_cpu_class *p_cls,
- int check_sl
- )
+static inline int node_surplus_consume(int old_surplus,
+ struct ckrm_core_class *child_core,
+ struct ckrm_cpu_class *p_cls)
{
int consumed = 0;
int inc_limit;
- int total_grt = p_cls->shares.total_guarantee;
- BUG_ON(surplus < 0);
+ struct ckrm_cpu_class *c_cls = ckrm_get_cpu_class(child_core);
- /*can't consume more than demand or hard limit*/
- if (c_cls->stat.eshare >= c_cls->stat.max_demand)
+ if (c_cls->stat.glut)
goto out;
- //the surplus allocation is propotional to grt
- consumed =
- surplus * c_cls->shares.my_guarantee / total_grt;
-
- if (! consumed) //no more share
+ //check demand
+ if (c_cls->stat.effective_share >= c_cls->stat.cpu_demand) {
+ c_cls->stat.glut = 1;
goto out;
-
- //hard limit and demand limit
- inc_limit = c_cls->stat.max_demand - c_cls->stat.eshare;
-
- if (check_sl) {
- int esl = p_cls->stat.eshare * get_soft_limit(c_cls)
- /total_grt;
- if (esl < c_cls->stat.max_demand)
- inc_limit = esl - c_cls->stat.eshare;
}
- if (consumed > inc_limit)
- consumed = inc_limit;
-
- BUG_ON(consumed < 0);
- out:
- return consumed;
-}
-
-/*
- * how much a node can consume for itself?
- */
-static inline int consume_self_surplus(int surplus,
- struct ckrm_cpu_class *p_cls,
- int check_sl
- )
-{
- int consumed = 0;
- int inc_limit;
- int total_grt = p_cls->shares.total_guarantee;
- int max_demand = get_mmax_demand(&p_cls->stat);
-
- BUG_ON(surplus < 0);
-
- /*can't consume more than demand or hard limit*/
- if (p_cls->stat.meshare >= max_demand)
- goto out;
-
- //the surplus allocation is propotional to grt
consumed =
- surplus * p_cls->shares.unused_guarantee / total_grt;
-
- if (! consumed) //no more share
- goto out;
-
- //hard limit and demand limit
- inc_limit = max_demand - p_cls->stat.meshare;
+ old_surplus * c_cls->shares.my_guarantee /
+ p_cls->shares.total_guarantee;
- if (check_sl) {
- int mesl = p_cls->stat.eshare * get_mysoft_limit(p_cls)
- /total_grt;
- if (mesl < max_demand)
- inc_limit = mesl - p_cls->stat.meshare;
- }
-
- if (consumed > inc_limit)
+ //check limit
+ inc_limit = c_cls->stat.effective_limit - c_cls->stat.effective_share;
+ if (inc_limit <= consumed) {
+ c_cls->stat.glut = 1;
consumed = inc_limit;
+ }
- BUG_ON(consumed < 0);
- out:
+ c_cls->stat.effective_share += consumed;
+ out:
return consumed;
}
-
/*
- * allocate surplus to all its children and also its default class
- */
-static int alloc_surplus_single_round(
- int surplus,
- struct ckrm_core_class *parent,
- struct ckrm_cpu_class *p_cls,
- int check_sl)
-{
- struct ckrm_cpu_class *c_cls;
- struct ckrm_core_class *child_core = NULL;
- int total_consumed = 0,consumed;
-
- //first allocate to the default class
- consumed =
- consume_self_surplus(surplus,p_cls,check_sl);
-
- if (consumed > 0) {
- set_meshare(&p_cls->stat,p_cls->stat.meshare + consumed);
- total_consumed += consumed;
- }
-
- do {
- child_core = ckrm_get_next_child(parent, child_core);
- if (child_core) {
- c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- return -1;
-
- consumed =
- consume_surplus(surplus, c_cls,
- p_cls,check_sl);
- if (consumed > 0) {
- set_eshare(&c_cls->stat,c_cls->stat.eshare + consumed);
- total_consumed += consumed;
- }
- }
- } while (child_core);
-
- return total_consumed;
-}
-
-/**
- * alloc_surplus_node: re-allocate the shares for children under parent
- * @parent: parent node
- * return the remaining surplus
- *
+ * re-allocate the shares for all the childs under this node
* task:
* 1. get total surplus
* 2. allocate surplus
* 3. set the effective_share of each node
*/
-static int alloc_surplus_node(struct ckrm_core_class *parent)
+static void alloc_surplus_node(struct ckrm_core_class *parent)
{
- struct ckrm_cpu_class *p_cls,*c_cls;
- int total_surplus,consumed;
- int check_sl;
- int ret = -1;
+ int total_surplus = 0, old_surplus = 0;
+ struct ckrm_cpu_class *p_cls = ckrm_get_cpu_class(parent);
struct ckrm_core_class *child_core = NULL;
-
- p_cls = ckrm_get_cpu_class(parent);
- if (! p_cls)
- goto realloc_out;
+ int self_share;
/*
- * get total surplus
+ * calculate surplus
+ * total_surplus = sum(child_surplus)
+ * reset glut flag
+ * initialize effective_share
*/
- total_surplus = p_cls->stat.eshare - p_cls->stat.egrt;
- BUG_ON(total_surplus < 0);
- total_surplus += get_my_node_surplus(p_cls);
-
do {
child_core = ckrm_get_next_child(parent, child_core);
if (child_core) {
- c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- goto realloc_out;
+ struct ckrm_cpu_class *c_cls =
+ ckrm_get_cpu_class(child_core);
+ ckrm_stat_t *stat = &c_cls->stat;
total_surplus += get_node_surplus(c_cls);
+ stat->glut = 0;
+ set_effective_share(stat, stat->effective_guarantee);
}
} while (child_core);
-
- if (! total_surplus) {
- ret = 0;
- goto realloc_out;
- }
-
- /*
- * distributing the surplus
- * first with the check_sl enabled
- * once all the tasks has research the soft limit, disable check_sl and try again
- */
-
- check_sl = 1;
+ /*distribute the surplus */
+ child_core = NULL;
do {
- consumed = alloc_surplus_single_round(total_surplus,parent,p_cls,check_sl);
- if (consumed < 0) //something is wrong
- goto realloc_out;
+ if (!child_core) //keep the surplus of last round
+ old_surplus = total_surplus;
- if (! consumed)
- check_sl = 0;
- else
- total_surplus -= consumed;
+ child_core = ckrm_get_next_child(parent, child_core);
+ if (child_core) {
+ total_surplus -=
+ node_surplus_consume(old_surplus, child_core,
+ p_cls);
+ }
+ //start a new round if something is allocated in the last round
+ } while (child_core || (total_surplus != old_surplus));
- } while ((total_surplus > 0) && (consumed || check_sl) );
+ //any remaining surplus goes to the default class
+ self_share = p_cls->stat.effective_share *
+ p_cls->shares.unused_guarantee / p_cls->shares.total_guarantee;
+ self_share += total_surplus;
- ret = 0;
-
- realloc_out:
- return ret;
+ set_self_effective_share(&p_cls->stat, self_share);
}
/**
* alloc_surplus - reallocate unused shares
*
* class A's usused share should be allocated to its siblings
- * the re-allocation goes downward from the top
*/
-static int alloc_surplus(struct ckrm_core_class *root_core)
+static void alloc_surplus(struct ckrm_core_class *root_core)
{
- struct ckrm_core_class *cur_core, *child_core;
- // struct ckrm_cpu_class *cls;
- int ret = -1;
+ struct ckrm_core_class *cur_core, *child_core = NULL;
+ struct ckrm_cpu_class *cls;
- /*initialize*/
cur_core = root_core;
- child_core = NULL;
- // cls = ckrm_get_cpu_class(cur_core);
-
- /*the ckrm idle tasks get all what's remaining*/
- /*hzheng: uncomment the following like for hard limit support */
- // update_ckrm_idle(CKRM_SHARE_MAX - cls->stat.max_demand);
-
- repeat:
+ cls = ckrm_get_cpu_class(cur_core);
+ cls->stat.glut = 0;
+ set_effective_share(&cls->stat, cls->stat.effective_guarantee);
+ repeat:
//check exit
if (!cur_core)
- return 0;
-
- //visit this node only once
- if (! child_core)
- if ( alloc_surplus_node(cur_core) < 0 )
- return ret;
+ return;
+ //visit this node
+ alloc_surplus_node(cur_core);
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
if (child_core) {
goto repeat;
}
-/**********************************************/
-/* CKRM Idle Tasks */
-/**********************************************/
-struct ckrm_cpu_class ckrm_idle_class_obj, *ckrm_idle_class;
-struct task_struct* ckrm_idle_tasks[NR_CPUS];
-
-/*how many ckrm idle tasks should I wakeup*/
-static inline int get_nr_idle(unsigned long surplus)
-{
- int cpu_online = cpus_weight(cpu_online_map);
- int nr_idle = 0;
-
- nr_idle = surplus * cpu_online;
- nr_idle >>= CKRM_SHARE_ACCURACY;
-
- if (surplus)
- nr_idle ++;
-
- if (nr_idle > cpu_online)
- nr_idle = cpu_online;
-
- return nr_idle;
-}
-
-/**
- * update_ckrm_idle: update the status of the idle class according to the new surplus
- * surplus: new system surplus
- *
- * Task:
- * -- update share of the idle class
- * -- wakeup idle tasks according to surplus
- */
-void update_ckrm_idle(unsigned long surplus)
-{
- int nr_idle = get_nr_idle(surplus);
- int i;
- struct task_struct* idle_task;
-
- set_eshare(&ckrm_idle_class->stat,surplus);
- set_meshare(&ckrm_idle_class->stat,surplus);
- /*wake up nr_idle idle tasks*/
- for_each_online_cpu(i) {
- idle_task = ckrm_idle_tasks[i];
- if (unlikely(idle_task->cpu_class != ckrm_idle_class)) {
- ckrm_cpu_change_class(idle_task,
- idle_task->cpu_class,
- ckrm_idle_class);
- }
- if (! idle_task)
- continue;
- if (i < nr_idle) {
- //activate it
- wake_up_process(idle_task);
- } else {
- //deactivate it
- idle_task->state = TASK_INTERRUPTIBLE;
- set_tsk_need_resched(idle_task);
- }
- }
-}
-
-static int ckrm_cpu_idled(void *nothing)
-{
- set_user_nice(current,19);
- daemonize("ckrm_idle_task");
-
- //deactivate it, it will be awakened by ckrm_cpu_monitor
- current->state = TASK_INTERRUPTIBLE;
- schedule();
-
- /*similar to cpu_idle */
- while (1) {
- while (!need_resched()) {
- ckrm_cpu_monitor(1);
- if (current_cpu_data.hlt_works_ok) {
- local_irq_disable();
- if (!need_resched()) {
- set_tsk_need_resched(current);
- safe_halt();
- } else
- local_irq_enable();
- }
- }
- schedule();
- }
- return 0;
-}
-
-/**
- * ckrm_start_ckrm_idle:
- * create the ckrm_idle_class and starts the idle tasks
- *
- */
-void ckrm_start_ckrm_idle(void)
-{
- int i;
- int ret;
- ckrm_shares_t shares;
-
- ckrm_idle_class = &ckrm_idle_class_obj;
- memset(ckrm_idle_class,0,sizeof(shares));
- /*don't care about the shares */
- init_cpu_class(ckrm_idle_class,&shares);
- printk(KERN_INFO"ckrm idle class %x created\n",(int)ckrm_idle_class);
-
- for_each_online_cpu(i) {
- ret = kernel_thread(ckrm_cpu_idled, 0, CLONE_KERNEL);
-
- /*warn on error, but the system should still work without it*/
- if (ret < 0)
- printk(KERN_ERR"Warn: can't start ckrm idle tasks\n");
- else {
- ckrm_idle_tasks[i] = find_task_by_pid(ret);
- if (!ckrm_idle_tasks[i])
- printk(KERN_ERR"Warn: can't find ckrm idle tasks %d\n",ret);
- }
- }
-}
-
-/**********************************************/
-/* Local Weight */
-/**********************************************/
-/**
- * adjust_class_local_weight: adjust the local weight for each cpu
- *
- * lrq->weight = lpr->pressure * class->weight / total_pressure
- */
-static void adjust_lrq_weight(struct ckrm_cpu_class *clsptr, int cpu_online)
-{
- unsigned long total_pressure = 0;
- ckrm_lrq_t* lrq;
- int i;
- unsigned long class_weight;
- unsigned long long lw;
-
- //get total pressure
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(clsptr,i);
- total_pressure += lrq->lrq_load;
- }
-
- if (! total_pressure)
- return;
-
- class_weight = cpu_class_weight(clsptr) * cpu_online;
-
- /*
- * update weight for each cpu, minimun is 1
- */
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(clsptr,i);
- if (! lrq->lrq_load)
- /*give idle class a high share to boost interactiveness */
- lw = cpu_class_weight(clsptr);
- else {
- lw = lrq->lrq_load * class_weight;
- do_div(lw,total_pressure);
- if (!lw)
- lw = 1;
- else if (lw > CKRM_SHARE_MAX)
- lw = CKRM_SHARE_MAX;
- }
-
- lrq->local_weight = lw;
- }
-}
-
-/*
- * assume called with class_list_lock read lock held
- */
-void adjust_local_weight(void)
-{
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- struct ckrm_cpu_class *clsptr;
- int cpu_online;
-
- //do nothing if someone already holding the lock
- if (! spin_trylock(&lock))
- return;
-
- cpu_online = cpus_weight(cpu_online_map);
-
- //class status: demand, share,total_ns prio, index
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- adjust_lrq_weight(clsptr,cpu_online);
- }
-
- spin_unlock(&lock);
-}
-
-/**********************************************/
-/* Main */
-/**********************************************/
/**
*ckrm_cpu_monitor - adjust relative shares of the classes based on their progress
- *@check_min: if check_min is set, the call can't be within 100ms of last call
*
* this function is called every CPU_MONITOR_INTERVAL
* it computes the cpu demand of each class
* and re-allocate the un-used shares to other classes
*/
-void ckrm_cpu_monitor(int check_min)
+void ckrm_cpu_monitor(void)
{
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- static unsigned long long last_check = 0;
- struct ckrm_core_class *root_core = get_default_cpu_class()->core;
- unsigned long long now;
-#define MIN_CPU_MONITOR_INTERVAL 100000000UL
-
+ struct ckrm_core_class *root_core = default_cpu_class->core;
if (!root_core)
return;
- //do nothing if someone already holding the lock
- if (! spin_trylock(&lock))
- return;
-
- read_lock(&class_list_lock);
-
- now = sched_clock();
-
- //consecutive check should be at least 100ms apart
- if (check_min && ((now - last_check) < MIN_CPU_MONITOR_INTERVAL))
- goto outunlock;
-
- last_check = now;
-
- if (update_effectives(root_core) != 0)
- goto outunlock;
-
- if (update_max_demand(root_core) != 0)
- goto outunlock;
-
-#ifndef ALLOC_SURPLUS_SUPPORT
-#warning "MEF taking out alloc_surplus"
-#else
- if (alloc_surplus(root_core) != 0)
- goto outunlock;
-#endif
-
- adjust_local_weight();
-
- outunlock:
- read_unlock(&class_list_lock);
- spin_unlock(&lock);
+ update_effective_guarantee_limit(root_core);
+ update_cpu_demand(root_core);
+ alloc_surplus(root_core);
}
/*****************************************************/
static int ckrm_cpu_monitord(void *nothing)
{
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+
daemonize("ckrm_cpu_ctrld");
for (;;) {
/*sleep for sometime before next try*/
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(CPU_MONITOR_INTERVAL);
- ckrm_cpu_monitor(1);
+ interruptible_sleep_on_timeout(&wait, CPU_MONITOR_INTERVAL);
+ ckrm_cpu_monitor();
if (thread_exit) {
break;
}
}
cpu_monitor_pid = -1;
thread_exit = 2;
- printk(KERN_DEBUG "cpu_monitord exit\n");
+ printk("cpu_monitord exit\n");
return 0;
}
{
cpu_monitor_pid = kernel_thread(ckrm_cpu_monitord, 0, CLONE_KERNEL);
if (cpu_monitor_pid < 0) {
- printk(KERN_DEBUG "ckrm_cpu_monitord for failed\n");
+ printk("ckrm_cpu_monitord for failed\n");
}
}
void ckrm_kill_monitor(void)
{
- printk(KERN_DEBUG "killing process %d\n", cpu_monitor_pid);
+ wait_queue_head_t wait;
+ int interval = HZ;
+ init_waitqueue_head(&wait);
+
+ printk("killing process %d\n", cpu_monitor_pid);
if (cpu_monitor_pid > 0) {
thread_exit = 1;
while (thread_exit != 2) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(CPU_MONITOR_INTERVAL);
+ interruptible_sleep_on_timeout(&wait, interval);
}
}
}
int ckrm_cpu_monitor_init(void)
{
ckrm_start_monitor();
- /*hzheng: uncomment the following like for hard limit support */
- // ckrm_start_ckrm_idle();
return 0;
}
+++ /dev/null
-/* ckrm_socketaq.c - accept queue resource controller
- *
- * Copyright (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- * Initial version
- */
-
-/* Code Description: TBD
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/errno.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/ckrm.h>
-#include <linux/ckrm_rc.h>
-#include <net/tcp.h>
-
-#include <linux/ckrm_net.h>
-
-#define hnode_2_core(ptr) \
- ((ptr) ? container_of(ptr, struct ckrm_core_class, hnode) : NULL)
-
-#define CKRM_SAQ_MAX_DEPTH 3 // 0 => /rcfs
- // 1 => socket_aq
- // 2 => socket_aq/listen_class
- // 3 => socket_aq/listen_class/accept_queues
- // 4 => Not allowed
-
-typedef struct ckrm_laq_res {
- spinlock_t reslock;
- atomic_t refcnt;
- struct ckrm_shares shares;
- struct ckrm_core_class *core;
- struct ckrm_core_class *pcore;
- int my_depth;
- int my_id;
- unsigned int min_ratio;
-} ckrm_laq_res_t;
-
-static int my_resid = -1;
-
-extern struct ckrm_core_class *rcfs_create_under_netroot(char *, int, int);
-extern struct ckrm_core_class *rcfs_make_core(struct dentry *,
- struct ckrm_core_class *);
-
-void laq_res_hold(struct ckrm_laq_res *res)
-{
- atomic_inc(&res->refcnt);
- return;
-}
-
-void laq_res_put(struct ckrm_laq_res *res)
-{
- if (atomic_dec_and_test(&res->refcnt))
- kfree(res);
- return;
-}
-
-/* Initialize rescls values
- */
-static void laq_res_initcls(void *my_res)
-{
- ckrm_laq_res_t *res = my_res;
-
- res->shares.my_guarantee = CKRM_SHARE_DONTCARE;
- res->shares.my_limit = CKRM_SHARE_DONTCARE;
- res->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
- res->shares.unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.cur_max_limit = 0;
-}
-
-static int atoi(char *s)
-{
- int k = 0;
- while (*s)
- k = *s++ - '0' + (k * 10);
- return k;
-}
-
-static char *laq_get_name(struct ckrm_core_class *c)
-{
- char *p = (char *)c->name;
-
- while (*p)
- p++;
- while (*p != '/' && p != c->name)
- p--;
-
- return ++p;
-}
-
-static void *laq_res_alloc(struct ckrm_core_class *core,
- struct ckrm_core_class *parent)
-{
- ckrm_laq_res_t *res, *pres;
- int pdepth;
-
- if (parent)
- pres = ckrm_get_res_class(parent, my_resid, ckrm_laq_res_t);
- else
- pres = NULL;
-
- if (core == core->classtype->default_class)
- pdepth = 1;
- else {
- if (!parent)
- return NULL;
- pdepth = 1 + pres->my_depth;
- }
-
- res = kmalloc(sizeof(ckrm_laq_res_t), GFP_ATOMIC);
- if (res) {
- memset(res, 0, sizeof(res));
- spin_lock_init(&res->reslock);
- laq_res_hold(res);
- res->my_depth = pdepth;
- if (pdepth == 2) // listen class
- res->my_id = 0;
- else if (pdepth == 3)
- res->my_id = atoi(laq_get_name(core));
- res->core = core;
- res->pcore = parent;
-
- // rescls in place, now initialize contents other than
- // hierarchy pointers
- laq_res_initcls(res); // acts as initialising value
- }
-
- return res;
-}
-
-static void laq_res_free(void *my_res)
-{
- ckrm_laq_res_t *res = (ckrm_laq_res_t *) my_res;
- ckrm_laq_res_t *parent;
-
- if (!res)
- return;
-
- if (res->my_depth != 3) {
- kfree(res);
- return;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) // Should never happen
- return;
-
- spin_lock(&parent->reslock);
- spin_lock(&res->reslock);
-
- // return child's guarantee to parent node
- // Limits have no meaning for accept queue control
- child_guarantee_changed(&parent->shares, res->shares.my_guarantee, 0);
-
- spin_unlock(&res->reslock);
- laq_res_put(res);
- spin_unlock(&parent->reslock);
- return;
-}
-
-/**************************************************************************
- * SHARES ***
- **************************************************************************/
-
-void laq_set_aq_value(struct ckrm_net_struct *ns, unsigned int *aq_ratio)
-{
- int i;
- struct tcp_opt *tp;
-
- tp = tcp_sk(ns->ns_sk);
- for (i = 0; i < NUM_ACCEPT_QUEUES; i++)
- tp->acceptq[i].aq_ratio = aq_ratio[i];
- return;
-}
-void laq_set_aq_values(ckrm_laq_res_t * parent, unsigned int *aq_ratio)
-{
-
- struct ckrm_net_struct *ns;
- struct ckrm_core_class *core = parent->core;
-
- class_lock(core);
- list_for_each_entry(ns, &core->objlist, ckrm_link) {
- laq_set_aq_value(ns, aq_ratio);
- }
- class_unlock(core);
- return;
-}
-
-static void calculate_aq_ratios(ckrm_laq_res_t * res, unsigned int *aq_ratio)
-{
- struct ckrm_hnode *chnode;
- ckrm_laq_res_t *child;
- unsigned int min;
- int i;
-
- min = aq_ratio[0] = (unsigned int)res->shares.unused_guarantee;
-
- list_for_each_entry(chnode, &res->core->hnode.children, siblings) {
- child = hnode_2_core(chnode)->res_class[my_resid];
-
- aq_ratio[child->my_id] =
- (unsigned int)child->shares.my_guarantee;
- if (aq_ratio[child->my_id] == CKRM_SHARE_DONTCARE)
- aq_ratio[child->my_id] = 0;
- if (aq_ratio[child->my_id] &&
- ((unsigned int)aq_ratio[child->my_id] < min))
- min = (unsigned int)child->shares.my_guarantee;
- }
-
- if (min == 0) {
- min = 1;
- // default takes all if nothing specified
- aq_ratio[0] = 1;
- }
- res->min_ratio = min;
-
- for (i = 0; i < NUM_ACCEPT_QUEUES; i++)
- aq_ratio[i] = aq_ratio[i] / min;
-}
-
-static int laq_set_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_laq_res_t *res = my_res;
- ckrm_laq_res_t *parent;
- unsigned int aq_ratio[NUM_ACCEPT_QUEUES];
- int rc = 0;
-
- if (!res)
- return -EINVAL;
-
- if (!res->pcore) {
- // something is badly wrong
- printk(KERN_ERR "socketaq internal inconsistency\n");
- return -EBADF;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) // socketclass does not have a share interface
- return -EINVAL;
-
- // Ensure that we ignore limit values
- shares->my_limit = CKRM_SHARE_DONTCARE;
- shares->max_limit = CKRM_SHARE_UNCHANGED;
-
- if (res->my_depth == 0) {
- printk(KERN_ERR "socketaq bad entry\n");
- return -EBADF;
- } else if (res->my_depth == 1) {
- // can't be written to. This is an internal default.
- return -EINVAL;
- } else if (res->my_depth == 2) {
- //nothin to inherit
- if (!shares->total_guarantee) {
- return -EINVAL;
- }
- parent = res;
- shares->my_guarantee = CKRM_SHARE_DONTCARE;
- } else if (res->my_depth == 3) {
- // accept queue itself.
- shares->total_guarantee = CKRM_SHARE_UNCHANGED;
- }
-
- ckrm_lock_hier(parent->pcore);
- spin_lock(&parent->reslock);
- rc = set_shares(shares, &res->shares,
- (parent == res) ? NULL : &parent->shares);
- if (rc) {
- spin_unlock(&res->reslock);
- ckrm_unlock_hier(res->pcore);
- return rc;
- }
- calculate_aq_ratios(parent, aq_ratio);
- laq_set_aq_values(parent, aq_ratio);
- spin_unlock(&parent->reslock);
- ckrm_unlock_hier(parent->pcore);
-
- return rc;
-}
-
-static int laq_get_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_laq_res_t *res = my_res;
-
- if (!res)
- return -EINVAL;
- *shares = res->shares;
- return 0;
-}
-
-/**************************************************************************
- * STATS ***
- **************************************************************************/
-
-void
-laq_print_aq_stats(struct seq_file *sfile, struct tcp_acceptq_info *taq, int i)
-{
- seq_printf(sfile, "Class %d connections:\n\taccepted: %u\n\t"
- "queued: %u\n\twait_time: %u\n",
- i, taq->acceptq_count, taq->acceptq_qcount,
- jiffies_to_msecs(taq->acceptq_wait_time));
-
- if (i)
- return;
-
- for (i = 1; i < NUM_ACCEPT_QUEUES; i++) {
- taq[0].acceptq_wait_time += taq[i].acceptq_wait_time;
- taq[0].acceptq_qcount += taq[i].acceptq_qcount;
- taq[0].acceptq_count += taq[i].acceptq_count;
- }
-
- seq_printf(sfile, "Totals :\n\taccepted: %u\n\t"
- "queued: %u\n\twait_time: %u\n",
- taq->acceptq_count, taq->acceptq_qcount,
- jiffies_to_msecs(taq->acceptq_wait_time));
-
- return;
-}
-
-void
-laq_get_aq_stats(ckrm_laq_res_t * pres, ckrm_laq_res_t * mres,
- struct tcp_acceptq_info *taq)
-{
- struct ckrm_net_struct *ns;
- struct ckrm_core_class *core = pres->core;
- struct tcp_opt *tp;
- int a = mres->my_id;
- int z;
-
- if (a == 0)
- z = NUM_ACCEPT_QUEUES;
- else
- z = a + 1;
-
- // XXX Instead of holding a class_lock introduce a rw
- // lock to be write locked by listen callbacks and read locked here.
- // - VK
- class_lock(pres->core);
- list_for_each_entry(ns, &core->objlist, ckrm_link) {
- tp = tcp_sk(ns->ns_sk);
- for (; a < z; a++) {
- taq->acceptq_wait_time += tp->acceptq[a].aq_wait_time;
- taq->acceptq_qcount += tp->acceptq[a].aq_qcount;
- taq->acceptq_count += tp->acceptq[a].aq_count;
- taq++;
- }
- }
- class_unlock(pres->core);
-}
-
-static int laq_get_stats(void *my_res, struct seq_file *sfile)
-{
- ckrm_laq_res_t *res = my_res;
- ckrm_laq_res_t *parent;
- struct tcp_acceptq_info taq[NUM_ACCEPT_QUEUES];
- int rc = 0;
-
- if (!res)
- return -EINVAL;
-
- if (!res->pcore) {
- // something is badly wrong
- printk(KERN_ERR "socketaq internal inconsistency\n");
- return -EBADF;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) { // socketclass does not have a stat interface
- printk(KERN_ERR "socketaq internal fs inconsistency\n");
- return -EINVAL;
- }
-
- memset(taq, 0, sizeof(struct tcp_acceptq_info) * NUM_ACCEPT_QUEUES);
-
- switch (res->my_depth) {
-
- default:
- case 0:
- printk(KERN_ERR "socket class bad entry\n");
- rc = -EBADF;
- break;
-
- case 1: // can't be read from. this is internal default.
- // return -EINVAL
- rc = -EINVAL;
- break;
-
- case 2: // return the default and total
- ckrm_lock_hier(res->core); // block any deletes
- laq_get_aq_stats(res, res, &taq[0]);
- laq_print_aq_stats(sfile, &taq[0], 0);
- ckrm_unlock_hier(res->core); // block any deletes
- break;
-
- case 3:
- ckrm_lock_hier(parent->core); // block any deletes
- laq_get_aq_stats(parent, res, &taq[res->my_id]);
- laq_print_aq_stats(sfile, &taq[res->my_id], res->my_id);
- ckrm_unlock_hier(parent->core); // block any deletes
- break;
- }
-
- return rc;
-}
-
-/*
- * The network connection is reclassified to this class. Update its shares.
- * The socket lock is held.
- */
-static void laq_change_resclass(void *n, void *old, void *r)
-{
- struct ckrm_net_struct *ns = (struct ckrm_net_struct *)n;
- struct ckrm_laq_res *res = (struct ckrm_laq_res *)r;
- unsigned int aq_ratio[NUM_ACCEPT_QUEUES];
-
- if (res->my_depth != 2)
- return;
-
- // a change to my_depth == 3 ie. the accept classes cannot happen.
- // there is no target file
- if (res->my_depth == 2) { // it is one of the socket classes
- ckrm_lock_hier(res->pcore);
- // share rule: hold parent resource lock. then self.
- // However, since my_depth == 1 is a generic class it is not
- // needed here. Self lock is enough.
- spin_lock(&res->reslock);
- calculate_aq_ratios(res, aq_ratio);
- class_lock(res->pcore);
- laq_set_aq_value(ns, aq_ratio);
- class_unlock(res->pcore);
- spin_unlock(&res->reslock);
- ckrm_unlock_hier(res->pcore);
- }
-
- return;
-}
-
-struct ckrm_res_ctlr laq_rcbs = {
- .res_name = "laq",
- .resid = -1, // dynamically assigned
- .res_alloc = laq_res_alloc,
- .res_free = laq_res_free,
- .set_share_values = laq_set_share_values,
- .get_share_values = laq_get_share_values,
- .get_stats = laq_get_stats,
- .change_resclass = laq_change_resclass,
- //.res_initcls = laq_res_initcls, //HUBERTUS: unnecessary !!
-};
-
-int __init init_ckrm_laq_res(void)
-{
- struct ckrm_classtype *clstype;
- int resid;
-
- clstype = ckrm_find_classtype_by_name("socketclass");
- if (clstype == NULL) {
- printk(KERN_INFO " Unknown ckrm classtype<socketclass>");
- return -ENOENT;
- }
-
- if (my_resid == -1) {
- resid = ckrm_register_res_ctlr(clstype, &laq_rcbs);
- if (resid >= 0)
- my_resid = resid;
- printk(KERN_DEBUG "........init_ckrm_listen_aq_res -> %d\n", my_resid);
- }
- return 0;
-
-}
-
-void __exit exit_ckrm_laq_res(void)
-{
- ckrm_unregister_res_ctlr(&laq_rcbs);
- my_resid = -1;
-}
-
-module_init(init_ckrm_laq_res)
- module_exit(exit_ckrm_laq_res)
-
- MODULE_LICENSE("GPL");
+++ /dev/null
-/* ckrm_mem.c - Memory Resource Manager for CKRM
- *
- * Copyright (C) Chandra Seetharaman, IBM Corp. 2004
- *
- * Provides a Memory Resource controller for CKRM
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Code Description: TBD
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/errno.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
-#include <linux/cache.h>
-#include <linux/percpu.h>
-#include <linux/pagevec.h>
-
-#include <linux/ckrm_mem_inline.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-#define MEM_NAME "mem"
-
-#define CKRM_MEM_MAX_HIERARCHY 2 // allows only upto 2 levels - 0, 1 & 2
-
-/* all 1-level memory_share_class are chained together */
-static LIST_HEAD(ckrm_memclass_list);
-LIST_HEAD(ckrm_shrink_list);
-EXPORT_SYMBOL(ckrm_shrink_list);
-spinlock_t ckrm_mem_lock = SPIN_LOCK_UNLOCKED; // protects both lists above
-EXPORT_SYMBOL(ckrm_mem_lock);
-unsigned int ckrm_tot_lru_pages; // total # of pages in the system
- // currently doesn't handle memory add/remove
-EXPORT_SYMBOL(ckrm_tot_lru_pages);
-
-static ckrm_mem_res_t *ckrm_mem_root_class;
-atomic_t ckrm_mem_real_count = ATOMIC_INIT(0);
-EXPORT_SYMBOL(ckrm_mem_real_count);
-static void ckrm_mem_evaluate_all_pages(void);
-
-/* Initialize rescls values
- * May be called on each rcfs unmount or as part of error recovery
- * to make share values sane.
- * Does not traverse hierarchy reinitializing children.
- */
-
-static void
-set_ckrm_tot_pages(void)
-{
- struct zone *zone;
- int tot_lru_pages = 0;
-
- for_each_zone(zone) {
- tot_lru_pages += zone->nr_active;
- tot_lru_pages += zone->nr_inactive;
- tot_lru_pages += zone->free_pages;
- }
- ckrm_tot_lru_pages = tot_lru_pages;
-}
-
-static void
-mem_res_initcls_one(void *my_res)
-{
- ckrm_mem_res_t *res = my_res;
-
- memset(res, 0, sizeof(ckrm_mem_res_t));
-
- res->shares.my_guarantee = CKRM_SHARE_DONTCARE;
- res->shares.my_limit = CKRM_SHARE_DONTCARE;
- res->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
- res->shares.unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.cur_max_limit = 0;
-
- res->pg_guar = CKRM_SHARE_DONTCARE;
- res->pg_limit = CKRM_SHARE_DONTCARE;
- res->pg_unused = 0;
-}
-
-static void *
-mem_res_alloc(struct ckrm_core_class *core, struct ckrm_core_class *parent)
-{
- ckrm_mem_res_t *res, *parres;
-
- if (mem_rcbs.resid == -1) {
- return NULL;
- }
-
- parres = ckrm_get_res_class(parent, mem_rcbs.resid, ckrm_mem_res_t);
- if (parres && (parres->hier == CKRM_MEM_MAX_HIERARCHY)) {
- // allows only upto CKRM_MEM_MAX_HIERARCHY
- return NULL;
- }
-
- if (unlikely((parent == NULL) && (ckrm_mem_root_class != NULL))) {
- printk(KERN_ERR "MEM_RC: Only one root class is allowed\n");
- return NULL;
- }
-
- if (unlikely((parent != NULL) && (ckrm_mem_root_class == NULL))) {
- printk(KERN_ERR "MEM_RC: creating child class without root class\n");
- return NULL;
- }
-
- res = kmalloc(sizeof(ckrm_mem_res_t), GFP_ATOMIC);
-
- if (res) {
- mem_res_initcls_one(res);
- res->core = core;
- res->parent = parent;
- spin_lock(&ckrm_mem_lock);
- list_add(&res->mcls_list, &ckrm_memclass_list);
- spin_unlock(&ckrm_mem_lock);
- if (parent == NULL) {
- // I am part of the root class. So, set the max to
- // number of pages available
- res->pg_guar = ckrm_tot_lru_pages;
- res->pg_unused = ckrm_tot_lru_pages;
- res->pg_limit = ckrm_tot_lru_pages;
- res->hier = 0;
- ckrm_mem_root_class = res;
- } else {
- res->hier = parres->hier + 1;
- }
- mem_class_get(res);
- }
- else
- printk(KERN_ERR "mem_res_alloc: failed GFP_ATOMIC alloc\n");
- return res;
-}
-
-/*
- * It is the caller's responsibility to make sure that the parent only
- * has chilren that are to be accounted. i.e if a new child is added
- * this function should be called after it has been added, and if a
- * child is deleted this should be called after the child is removed.
- */
-static void
-child_maxlimit_changed_local(ckrm_mem_res_t *parres)
-{
- int maxlimit = 0;
- ckrm_mem_res_t *childres;
- ckrm_core_class_t *child = NULL;
-
- // run thru parent's children and get the new max_limit of the parent
- ckrm_lock_hier(parres->core);
- while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
- childres = ckrm_get_res_class(child, mem_rcbs.resid,
- ckrm_mem_res_t);
- if (maxlimit < childres->shares.my_limit) {
- maxlimit = childres->shares.my_limit;
- }
- }
- ckrm_unlock_hier(parres->core);
- parres->shares.cur_max_limit = maxlimit;
-}
-
-static void
-mem_res_free(void *my_res)
-{
- ckrm_mem_res_t *res = my_res;
- ckrm_mem_res_t *parres;
-
- if (!res)
- return;
-
- res->shares.my_guarantee = 0;
- res->shares.my_limit = 0;
- res->pg_guar = 0;
- res->pg_limit = 0;
- res->pg_unused = 0;
-
- parres = ckrm_get_res_class(res->parent, mem_rcbs.resid, ckrm_mem_res_t);
- // return child's limit/guarantee to parent node
- if (parres) {
- child_guarantee_changed(&parres->shares, res->shares.my_guarantee, 0);
- child_maxlimit_changed_local(parres);
- }
- ckrm_mem_evaluate_all_pages();
- res->core = NULL;
-
- spin_lock(&ckrm_mem_lock);
- list_del(&res->mcls_list);
- spin_unlock(&ckrm_mem_lock);
- mem_class_put(res);
- return;
-}
-
-/*
- * Recalculate the guarantee and limit in # of pages... and propagate the
- * same to children.
- * Caller is responsible for protecting res and for the integrity of parres
- */
-static void
-recalc_and_propagate(ckrm_mem_res_t * res, ckrm_mem_res_t * parres)
-{
- ckrm_core_class_t *child = NULL;
- ckrm_mem_res_t *childres;
- int resid = mem_rcbs.resid;
- struct ckrm_shares *self = &res->shares;
-
- if (parres) {
- struct ckrm_shares *par = &parres->shares;
-
- // calculate pg_guar and pg_limit
- //
- if (parres->pg_guar == CKRM_SHARE_DONTCARE ||
- self->my_guarantee == CKRM_SHARE_DONTCARE) {
- res->pg_guar = CKRM_SHARE_DONTCARE;
- } else if (par->total_guarantee) {
- u64 temp = (u64) self->my_guarantee * parres->pg_guar;
- do_div(temp, par->total_guarantee);
- res->pg_guar = (int) temp;
- } else {
- res->pg_guar = 0;
- }
-
- if (parres->pg_limit == CKRM_SHARE_DONTCARE ||
- self->my_limit == CKRM_SHARE_DONTCARE) {
- res->pg_limit = CKRM_SHARE_DONTCARE;
- } else if (par->max_limit) {
- u64 temp = (u64) self->my_limit * parres->pg_limit;
- do_div(temp, par->max_limit);
- res->pg_limit = (int) temp;
- } else {
- res->pg_limit = 0;
- }
- }
-
- // Calculate unused units
- if (res->pg_guar == CKRM_SHARE_DONTCARE) {
- res->pg_unused = CKRM_SHARE_DONTCARE;
- } else if (self->total_guarantee) {
- u64 temp = (u64) self->unused_guarantee * res->pg_guar;
- do_div(temp, self->total_guarantee);
- res->pg_unused = (int) temp;
- } else {
- res->pg_unused = 0;
- }
-
- // propagate to children
- ckrm_lock_hier(res->core);
- while ((child = ckrm_get_next_child(res->core, child)) != NULL) {
- childres = ckrm_get_res_class(child, resid, ckrm_mem_res_t);
- recalc_and_propagate(childres, res);
- }
- ckrm_unlock_hier(res->core);
- return;
-}
-
-static int
-mem_set_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_mem_res_t *res = my_res;
- ckrm_mem_res_t *parres;
- int rc = EINVAL;
-
- if (!res)
- return -EINVAL;
-
- parres = ckrm_get_res_class(res->parent, mem_rcbs.resid, ckrm_mem_res_t);
-
- rc = set_shares(shares, &res->shares, parres ? &parres->shares : NULL);
-
- if ((rc == 0) && (parres != NULL)) {
- child_maxlimit_changed_local(parres);
- recalc_and_propagate(parres, NULL);
- }
- return rc;
-}
-
-static int
-mem_get_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_mem_res_t *res = my_res;
-
- if (!res)
- return -EINVAL;
- *shares = res->shares;
- return 0;
-}
-
-static int
-mem_get_stats(void *my_res, struct seq_file *sfile)
-{
- ckrm_mem_res_t *res = my_res;
-
- if (!res)
- return -EINVAL;
-
-#if 0
- seq_printf(sfile, "tot %6d;gua %6d;lmt %6d;unu %6d;"
- "lnt %6d;bor %6d;rlt %6d\n", atomic_read(&res->pg_total),
- res->pg_guar, res->pg_limit, res->pg_unused, res->pg_lent,
- res->pg_borrowed, atomic_read(&ckrm_mem_real_count));
-#endif
-
-
- seq_printf(sfile, "----------- Memory Resource stats start -----------\n");
- seq_printf(sfile, "Number of pages used(including pages lent to children):"
- " %d\n", atomic_read(&res->pg_total));
- seq_printf(sfile, "Number of pages guaranteed: %d\n",
- res->pg_guar);
- seq_printf(sfile, "Maximum limit of pages: %d\n",
- res->pg_limit);
- seq_printf(sfile, "Total number of pages available"
- "(after serving guarantees to children): %d\n",
- res->pg_unused);
- seq_printf(sfile, "Number of pages lent to children: %d\n",
- res->pg_lent);
- seq_printf(sfile, "Number of pages borrowed from the parent: %d\n",
- res->pg_borrowed);
- seq_printf(sfile, "----------- Memory Resource stats end -----------\n");
-
- return 0;
-}
-
-static void
-mem_change_resclass(void *tsk, void *old, void *new)
-{
- struct mm_struct *mm;
- struct task_struct *task = tsk, *t1;
- struct ckrm_mem_res *prev_mmcls;
-
- if (!task->mm || (new == old) || (old == (void *) -1))
- return;
-
- mm = task->active_mm;
- spin_lock(&mm->peertask_lock);
- prev_mmcls = mm->memclass;
-
- if (new == NULL) {
- list_del_init(&task->mm_peers);
- } else {
- int found = 0;
- list_for_each_entry(t1, &mm->tasklist, mm_peers) {
- if (t1 == task) {
- found++;
- break;
- }
- }
- if (!found) {
- list_del_init(&task->mm_peers);
- list_add_tail(&task->mm_peers, &mm->tasklist);
- }
- }
-
- spin_unlock(&mm->peertask_lock);
- ckrm_mem_evaluate_mm(mm);
- /*
- printk("chg_cls: task <%s:%d> mm %p oldmm %s newmm %s o %s n %s\n",
- task->comm, task->pid, mm, prev_mmcls ? prev_mmcls->core->name:
- "NULL", mm->memclass ? mm->memclass->core->name : "NULL",
- o ? o->core->name: "NULL", n ? n->core->name: "NULL");
- */
- return;
-}
-
-// config file is available only at the root level,
-// so assuming my_res to be the system level class
-static int
-mem_set_config(void *my_res, const char *cfgstr)
-{
- ckrm_mem_res_t *res = my_res;
-
- printk(KERN_INFO "%s class of %s is called with config<%s>\n",
- MEM_NAME, res->core->name, cfgstr);
- return 0;
-}
-
-static int
-mem_show_config(void *my_res, struct seq_file *sfile)
-{
- struct zone *zone;
- ckrm_mem_res_t *res = my_res;
- int active = 0, inactive = 0, fr = 0;
-
- if (!res)
- return -EINVAL;
-
- for_each_zone(zone) {
- active += zone->nr_active;
- inactive += zone->nr_inactive;
- fr += zone->free_pages;
- }
- seq_printf(sfile, "res=%s;tot_pages=%d,active=%d,inactive=%d,free=%d\n",
- MEM_NAME, ckrm_tot_lru_pages,active,inactive,fr);
-
-
- return 0;
-}
-
-static int
-mem_reset_stats(void *my_res)
-{
- ckrm_mem_res_t *res = my_res;
- printk(KERN_INFO " memclass of %s called for reset\n", res->core->name);
- return 0;
-}
-
-struct ckrm_res_ctlr mem_rcbs = {
- .res_name = MEM_NAME,
- .res_hdepth = CKRM_MEM_MAX_HIERARCHY,
- .resid = -1,
- .res_alloc = mem_res_alloc,
- .res_free = mem_res_free,
- .set_share_values = mem_set_share_values,
- .get_share_values = mem_get_share_values,
- .get_stats = mem_get_stats,
- .change_resclass = mem_change_resclass,
- .show_config = mem_show_config,
- .set_config = mem_set_config,
- .reset_stats = mem_reset_stats,
-};
-
-EXPORT_SYMBOL(mem_rcbs);
-
-int __init
-init_ckrm_mem_res(void)
-{
- struct ckrm_classtype *clstype;
- int resid = mem_rcbs.resid;
-
- set_ckrm_tot_pages();
- clstype = ckrm_find_classtype_by_name("taskclass");
- if (clstype == NULL) {
- printk(KERN_INFO " Unknown ckrm classtype<taskclass>");
- return -ENOENT;
- }
-
- if (resid == -1) {
- resid = ckrm_register_res_ctlr(clstype, &mem_rcbs);
- if (resid != -1) {
- mem_rcbs.classtype = clstype;
- }
- }
- return ((resid < 0) ? resid : 0);
-}
-
-void __exit
-exit_ckrm_mem_res(void)
-{
- ckrm_unregister_res_ctlr(&mem_rcbs);
- mem_rcbs.resid = -1;
-}
-
-module_init(init_ckrm_mem_res)
-module_exit(exit_ckrm_mem_res)
-
-static void
-set_flags_of_children(ckrm_mem_res_t *parres, unsigned int flag)
-{
- ckrm_mem_res_t *childres;
- ckrm_core_class_t *child = NULL;
-
- parres->reclaim_flags |= flag;
- ckrm_lock_hier(parres->core);
- while ((child = ckrm_get_next_child(parres->core, child)) != NULL) {
- childres = ckrm_get_res_class(child, mem_rcbs.resid,
- ckrm_mem_res_t);
- set_flags_of_children(childres, flag);
- }
- ckrm_unlock_hier(parres->core);
- return;
-}
-
-// FIXME: more attention is needed to this function
-static unsigned int
-set_usage_flags(ckrm_mem_res_t *res)
-{
- int tot_usage, cls_usage, range, guar;
-
- if (res->pg_limit == CKRM_SHARE_DONTCARE) {
- // No limit is set for the class. don't bother it
- res->reclaim_flags = 0;
- return res->reclaim_flags;
- }
-
- tot_usage = atomic_read(&res->pg_total);
- cls_usage = tot_usage - res->pg_lent;
- guar = (res->pg_guar > 0) ? res->pg_guar : 0;
- range = res->pg_limit - guar;
-
- if ((tot_usage > (guar + ((110 * range) / 100))) &&
- (res->pg_lent > (guar + ((25 * range) / 100)))) {
- set_flags_of_children(res, CLS_PARENT_OVER);
- }
-
- if (cls_usage > (guar + ((110 * range) / 100))) {
- res->reclaim_flags |= CLS_OVER_110;
- } else if (cls_usage > (guar + range)) {
- res->reclaim_flags |= CLS_OVER_100;
- } else if (cls_usage > (guar + ((3 * range) / 4))) {
- res->reclaim_flags |= CLS_OVER_75;
- } else if (cls_usage > (guar + (range / 2))) {
- res->reclaim_flags |= CLS_OVER_50;
- } else if (cls_usage > (guar + (range / 4))) {
- res->reclaim_flags |= CLS_OVER_25;
- } else if (cls_usage > guar) {
- res->reclaim_flags |= CLS_OVER_GUAR;
- } else {
- res->reclaim_flags = 0;
- }
- return res->reclaim_flags;
-}
-
-/*
- * The functions ckrm_setup_reclamation(), ckrm_teardown_reclamation(),
- * ckrm_get_reclaim_bits() and the macro ckrm_kick_page() along with the
- * macros CLS_* define how the pages are reclaimed.
- * Keeping this logic thru these interface eliminate the necessity to
- * change the reclaimation code in VM if we want to change the logic.
- */
-unsigned int
-ckrm_setup_reclamation(void)
-{
- ckrm_mem_res_t *res;
- unsigned int ret = 0;
-
- spin_lock(&ckrm_mem_lock);
- set_ckrm_tot_pages();
- ckrm_mem_root_class->pg_guar = ckrm_tot_lru_pages;
- ckrm_mem_root_class->pg_unused = ckrm_tot_lru_pages;
- ckrm_mem_root_class->pg_limit = ckrm_tot_lru_pages;
- recalc_and_propagate(ckrm_mem_root_class, NULL);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- ret |= set_usage_flags(res);
- }
- spin_unlock(&ckrm_mem_lock);
- return ret;
-}
-
-void
-ckrm_teardown_reclamation(void)
-{
- ckrm_mem_res_t *res;
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- res->reclaim_flags = 0;
- }
- spin_unlock(&ckrm_mem_lock);
-}
-
-void
-ckrm_get_reclaim_bits(unsigned int *flags, unsigned int *extract)
-{
- int i, j, mask = 0;
-
- if (*flags == 0) {
- *extract = 0;
- return;
- }
-
- if (*flags & CLS_SHRINK) {
- *extract = CLS_SHRINK;
- *flags = 0;
- return;
- }
-
- i = fls(*flags);
- for (j = i-1; j > 0; j--) {
- mask = (mask<<1) | 1;
- }
- *extract = (CLS_FLAGS_ALL & ~mask);
- *flags &= ~*extract;
- return;
-}
-
-void
-ckrm_at_limit(ckrm_mem_res_t *cls)
-{
-#ifndef AT_LIMIT_SUPPORT
-#warning "ckrm_at_limit disabled due to problems with memory hog tests"
-#else
- struct zone *zone;
- unsigned long now = jiffies;
-
- if (!cls || (cls->pg_limit == CKRM_SHARE_DONTCARE) ||
- ((cls->flags & MEM_AT_LIMIT) == MEM_AT_LIMIT)) {
- return;
- }
- if ((cls->last_shrink + (10 * HZ)) < now) { // 10 seconds since last ?
- cls->last_shrink = now;
- cls->shrink_count = 0;
- }
- cls->shrink_count++;
- if (cls->shrink_count > 10) {
- return;
- }
- spin_lock(&ckrm_mem_lock);
- list_add(&cls->shrink_list, &ckrm_shrink_list);
- spin_unlock(&ckrm_mem_lock);
- cls->flags |= MEM_AT_LIMIT;
- for_each_zone(zone) {
- wakeup_kswapd(zone);
- break; // only once is enough
- }
-#endif // AT_LIMIT_SUPPORT
-}
-
-static int unmapped = 0, changed = 0, unchanged = 0, maxnull = 0,
-anovma = 0, fnovma = 0;
-static void
-ckrm_mem_evaluate_page_anon(struct page* page)
-{
- ckrm_mem_res_t* pgcls = page_class(page);
- ckrm_mem_res_t* maxshareclass = NULL;
- struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
- struct vm_area_struct *vma;
- struct mm_struct* mm;
- int v = 0;
-
- spin_lock(&anon_vma->lock);
- BUG_ON(list_empty(&anon_vma->head));
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- v++;
- mm = vma->vm_mm;
- if (!maxshareclass ||
- ckrm_mem_share_compare(maxshareclass, mm->memclass) < 0) {
- maxshareclass = mm->memclass;
- }
- }
- spin_unlock(&anon_vma->lock);
- if (!v)
- anovma++;
-
- if (!maxshareclass)
- maxnull++;
- if (maxshareclass && (pgcls != maxshareclass)) {
- ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
-}
-
-static void
-ckrm_mem_evaluate_page_file(struct page* page)
-{
- ckrm_mem_res_t* pgcls = page_class(page);
- ckrm_mem_res_t* maxshareclass = NULL;
- struct address_space *mapping = page->mapping;
- struct vm_area_struct *vma = NULL;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct prio_tree_iter iter;
- struct mm_struct* mm;
- int v = 0;
-
- if (!mapping)
- return;
-
- if (!spin_trylock(&mapping->i_mmap_lock))
- return;
-
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
- v++;
- mm = vma->vm_mm;
- if (!maxshareclass || ckrm_mem_share_compare(maxshareclass,mm->memclass)<0)
- maxshareclass = mm->memclass;
- }
- spin_unlock(&mapping->i_mmap_lock);
-
- if (!v)
- fnovma++;
- if (!maxshareclass)
- maxnull++;
-
- if (maxshareclass && pgcls != maxshareclass) {
- ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
-}
-
-static void
-ckrm_mem_evaluate_page(struct page* page)
-{
- if (page->mapping) {
- if (PageAnon(page))
- ckrm_mem_evaluate_page_anon(page);
- else
- ckrm_mem_evaluate_page_file(page);
- } else
- unmapped++;
- return;
-}
-
-static void
-ckrm_mem_evaluate_all_pages()
-{
- struct page *page;
- struct zone *zone;
- int active = 0, inactive = 0, cleared = 0;
- int act_cnt, inact_cnt, idx;
- ckrm_mem_res_t *res;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- res->tmp_cnt = 0;
- }
- spin_unlock(&ckrm_mem_lock);
-
- for_each_zone(zone) {
- spin_lock_irq(&zone->lru_lock);
- list_for_each_entry(page, &zone->inactive_list, lru) {
- ckrm_mem_evaluate_page(page);
- active++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- list_for_each_entry(page, &zone->active_list, lru) {
- ckrm_mem_evaluate_page(page);
- inactive++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- spin_unlock_irq(&zone->lru_lock);
- }
- printk(KERN_DEBUG "all_pages: active %d inactive %d cleared %d\n",
- active, inactive, cleared);
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- act_cnt = 0; inact_cnt = 0; idx = 0;
- for_each_zone(zone) {
- act_cnt += res->nr_active[idx];
- inact_cnt += res->nr_inactive[idx];
- idx++;
- }
- printk(KERN_DEBUG "all_pages: %s: tmp_cnt %d; act_cnt %d inact_cnt %d\n",
- res->core->name, res->tmp_cnt, act_cnt, inact_cnt);
- }
- spin_unlock(&ckrm_mem_lock);
-
- // check all mm's in the system to see which memclass they are attached
- // to.
- return;
-}
-
-static /*inline*/ int
-class_migrate_pmd(struct mm_struct* mm, struct vm_area_struct* vma,
- pmd_t* pmdir, unsigned long address, unsigned long end)
-{
- pte_t *pte, *orig_pte;
- unsigned long pmd_end;
-
- if (pmd_none(*pmdir))
- return 0;
- BUG_ON(pmd_bad(*pmdir));
-
- orig_pte = pte = pte_offset_map(pmdir,address);
- pmd_end = (address+PMD_SIZE)&PMD_MASK;
- if (end>pmd_end)
- end = pmd_end;
-
- do {
- if (pte_present(*pte)) {
- BUG_ON(mm->memclass == NULL);
- ckrm_change_page_class(pte_page(*pte), mm->memclass);
- // ckrm_mem_evaluate_page(pte_page(*pte));
- }
- address += PAGE_SIZE;
- pte++;
- } while(address && (address<end));
- pte_unmap(orig_pte);
- return 0;
-}
-
-static /*inline*/ int
-class_migrate_pgd(struct mm_struct* mm, struct vm_area_struct* vma,
- pgd_t* pgdir, unsigned long address, unsigned long end)
-{
- pmd_t* pmd;
- unsigned long pgd_end;
-
- if (pgd_none(*pgdir))
- return 0;
- BUG_ON(pgd_bad(*pgdir));
-
- pmd = pmd_offset(pgdir,address);
- pgd_end = (address+PGDIR_SIZE)&PGDIR_MASK;
-
- if (pgd_end && (end>pgd_end))
- end = pgd_end;
-
- do {
- class_migrate_pmd(mm,vma,pmd,address,end);
- address = (address+PMD_SIZE)&PMD_MASK;
- pmd++;
- } while (address && (address<end));
- return 0;
-}
-
-static /*inline*/ int
-class_migrate_vma(struct mm_struct* mm, struct vm_area_struct* vma)
-{
- pgd_t* pgdir;
- unsigned long address, end;
-
- address = vma->vm_start;
- end = vma->vm_end;
-
- pgdir = pgd_offset(vma->vm_mm, address);
- do {
- class_migrate_pgd(mm,vma,pgdir,address,end);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- pgdir++;
- } while(address && (address<end));
- return 0;
-}
-
-/* this function is called with mm->peertask_lock hold */
-void
-ckrm_mem_evaluate_mm(struct mm_struct* mm)
-{
- struct task_struct *task;
- struct ckrm_mem_res *maxshareclass = NULL;
- struct vm_area_struct *vma;
-
- if (list_empty(&mm->tasklist)) {
- /* We leave the mm->memclass untouched since we believe that one
- * mm with no task associated will be deleted soon or attach
- * with another task later.
- */
- return;
- }
-
- list_for_each_entry(task, &mm->tasklist, mm_peers) {
- ckrm_mem_res_t* cls = GET_MEM_CLASS(task);
- if (!cls)
- continue;
- if (!maxshareclass || ckrm_mem_share_compare(maxshareclass,cls)<0 )
- maxshareclass = cls;
- }
-
- if (maxshareclass && (mm->memclass != (void *)maxshareclass)) {
- if (mm->memclass)
- mem_class_put(mm->memclass);
- mm->memclass = maxshareclass;
- mem_class_get(maxshareclass);
-
- /* Go through all VMA to migrate pages */
- down_read(&mm->mmap_sem);
- vma = mm->mmap;
- while(vma) {
- class_migrate_vma(mm, vma);
- vma = vma->vm_next;
- }
- up_read(&mm->mmap_sem);
- }
- return;
-}
-
-void
-ckrm_init_mm_to_task(struct mm_struct * mm, struct task_struct *task)
-{
- spin_lock(&mm->peertask_lock);
- if (!list_empty(&task->mm_peers)) {
- printk(KERN_ERR "CKRM_MEM: Task list should be empty, but is not!!\n");
- list_del_init(&task->mm_peers);
- }
- list_add_tail(&task->mm_peers, &mm->tasklist);
- spin_unlock(&mm->peertask_lock);
- if (mm->memclass != GET_MEM_CLASS(task))
- ckrm_mem_evaluate_mm(mm);
- return;
-}
-
-int
-ckrm_memclass_valid(ckrm_mem_res_t *cls)
-{
- ckrm_mem_res_t *tmp;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(tmp, &ckrm_memclass_list, mcls_list) {
- if (tmp == cls) {
- spin_unlock(&ckrm_mem_lock);
- return 1;
- }
- }
- spin_unlock(&ckrm_mem_lock);
- return 0;
-}
-
-MODULE_LICENSE("GPL");
static struct ckrm_sock_class sockclass_dflt_class = {
};
-#define SOCKET_CLASS_TYPE_NAME "socketclass"
+#define SOCKET_CLASS_TYPE_NAME "socket_class"
const char *dflt_sockclass_name = SOCKET_CLASS_TYPE_NAME;
if (!options)
return -EINVAL;
- if (target == NULL) {
- unsigned long id = simple_strtol(options,NULL,0);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (id != 0)
- return -EINVAL;
- printk(KERN_DEBUG "sock_class: reclassify all not net implemented\n");
- return 0;
- }
-
while ((p = strsep((char **)&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
void __init ckrm_meta_init_sockclass(void)
{
- printk(KERN_DEBUG "...... Initializing ClassType<%s> ........\n",
+ printk("...... Initializing ClassType<%s> ........\n",
CT_sockclass.name);
// intialize the default class
ckrm_init_core_class(&CT_sockclass, class_core(&sockclass_dflt_class),
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/errno.h>
-#include <asm/div64.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/ckrm.h>
res = ckrm_get_res_class(core, resid, ckrm_numtasks_t);
if (res == NULL)
return;
- if (unlikely(atomic_read(&res->cnt_cur_alloc) == 0)) {
- printk(KERN_WARNING "numtasks_put_ref: Trying to decrement "
- "counter below 0\n");
- return;
- }
atomic_dec(&res->cnt_cur_alloc);
if (atomic_read(&res->cnt_borrowed) > 0) {
atomic_dec(&res->cnt_borrowed);
parres = ckrm_get_res_class(res->parent, resid, ckrm_numtasks_t);
- if (unlikely(atomic_read(&res->cnt_cur_alloc) < 0)) {
- printk(KERN_WARNING "numtasks_res: counter below 0\n");
- }
- if (unlikely(atomic_read(&res->cnt_cur_alloc) > 0 ||
- atomic_read(&res->cnt_borrowed) > 0)) {
- printk(KERN_WARNING "numtasks_res_free: resource still "
- "alloc'd %p\n", res);
+ if (unlikely(atomic_read(&res->cnt_cur_alloc) != 0 ||
+ atomic_read(&res->cnt_borrowed))) {
+ printk(KERN_ERR
+ "numtasks_res_free: resource still alloc'd %p\n", res);
if ((borrowed = atomic_read(&res->cnt_borrowed)) > 0) {
for (i = 0; i < borrowed; i++) {
numtasks_put_ref_local(parres->core);
if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
res->cnt_guarantee = CKRM_SHARE_DONTCARE;
} else if (par->total_guarantee) {
- u64 temp = (u64) self->my_guarantee * parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- res->cnt_guarantee = (int) temp;
+ res->cnt_guarantee =
+ (self->my_guarantee * parres->cnt_guarantee)
+ / par->total_guarantee;
} else {
res->cnt_guarantee = 0;
}
if (parres->cnt_limit == CKRM_SHARE_DONTCARE) {
res->cnt_limit = CKRM_SHARE_DONTCARE;
} else if (par->max_limit) {
- u64 temp = (u64) self->my_limit * parres->cnt_limit;
- do_div(temp, par->max_limit);
- res->cnt_limit = (int) temp;
+ res->cnt_limit = (self->my_limit * parres->cnt_limit)
+ / par->max_limit;
} else {
res->cnt_limit = 0;
}
if (res->cnt_guarantee == CKRM_SHARE_DONTCARE) {
res->cnt_unused = CKRM_SHARE_DONTCARE;
} else if (self->total_guarantee) {
- u64 temp = (u64) self->unused_guarantee * res->cnt_guarantee;
- do_div(temp, self->total_guarantee);
- res->cnt_unused = (int) temp;
+ res->cnt_unused = (self->unused_guarantee *
+ res->cnt_guarantee) /
+ self->total_guarantee;
} else {
res->cnt_unused = 0;
}
if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
parres->cnt_unused = CKRM_SHARE_DONTCARE;
} else if (par->total_guarantee) {
- u64 temp = (u64) par->unused_guarantee * parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- parres->cnt_unused = (int) temp;
+ parres->cnt_unused = (par->unused_guarantee *
+ parres->cnt_guarantee) /
+ par->total_guarantee;
} else {
parres->cnt_unused = 0;
}
#ifdef NUMTASKS_DEBUG
seq_printf(sfile,
"cur_alloc %d; borrowed %d; cnt_guar %d; cnt_limit %d "
- "cnt_unused %d, unused_guarantee %d, cur_max_limit %d\n",
+ "unused_guarantee %d, cur_max_limit %d\n",
atomic_read(&res->cnt_cur_alloc),
atomic_read(&res->cnt_borrowed), res->cnt_guarantee,
- res->cnt_limit, res->cnt_unused,
- res->shares.unused_guarantee,
+ res->cnt_limit, res->shares.unused_guarantee,
res->shares.cur_max_limit);
#endif
if (!res)
return -EINVAL;
- printk(KERN_DEBUG "numtasks config='%s'\n", cfgstr);
+ printk("numtasks config='%s'\n", cfgstr);
return 0;
}
if (resid == -1) {
resid = ckrm_register_res_ctlr(clstype, &numtasks_rcbs);
- printk(KERN_DEBUG "........init_ckrm_numtasks_res -> %d\n", resid);
+ printk("........init_ckrm_numtasks_res -> %d\n", resid);
if (resid != -1) {
ckrm_numtasks_register(numtasks_get_ref_local,
numtasks_put_ref_local);
ckrm_task_unlock(tsk->parent);
}
if (!list_empty(&tsk->taskclass_link))
- printk(KERN_WARNING "BUG in cb_fork.. tsk (%s:%d> already linked\n",
+ printk("BUG in cb_fork.. tsk (%s:%d> already linked\n",
tsk->comm, tsk->pid);
ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_FORK);
CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_GID, current);
}
-static void
-cb_taskclass_xid(struct task_struct *tsk)
-{
- ECB_PRINTK("%p:%d:%s\n",current,current->pid,current->comm);
- CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_XID, tsk);
-}
-
static struct ckrm_event_spec taskclass_events_callbacks[] = {
CKRM_EVENT_SPEC(NEWTASK, cb_taskclass_newtask),
CKRM_EVENT_SPEC(EXEC, cb_taskclass_exec),
CKRM_EVENT_SPEC(EXIT, cb_taskclass_exit),
CKRM_EVENT_SPEC(UID, cb_taskclass_uid),
CKRM_EVENT_SPEC(GID, cb_taskclass_gid),
- CKRM_EVENT_SPEC(XID, cb_taskclass_xid),
{-1}
};
* We use a hybrid by comparing ratio nr_threads/pidmax
*/
-static int ckrm_reclassify_all_tasks(void)
+static void ckrm_reclassify_all_tasks(void)
{
extern int pid_max;
int ratio;
int use_bitmap;
- /* Check permissions */
- if ((!capable(CAP_SYS_NICE)) && (!capable(CAP_SYS_RESOURCE))) {
- return -EPERM;
- }
-
ratio = curpidmax / nr_threads;
if (curpidmax <= PID_MAX_DEFAULT) {
use_bitmap = 1;
ce_protect(&CT_taskclass);
retry:
-
if (use_bitmap == 0) {
// go through it in one walk
read_lock(&tasklist_lock);
} else {
read_unlock(&tasklist_lock);
}
- pos++;
}
}
}
ce_release(&CT_taskclass);
- return 0;
+}
+
+int ckrm_reclassify(int pid)
+{
+ struct task_struct *tsk;
+ int rc = 0;
+
+ down(&async_serializer); // protect again race condition
+ if (pid < 0) {
+ // do we want to treat this as process group .. should YES ToDo
+ rc = -EINVAL;
+ } else if (pid == 0) {
+ // reclassify all tasks in the system
+ ckrm_reclassify_all_tasks();
+ } else {
+ // reclassify particular pid
+ read_lock(&tasklist_lock);
+ if ((tsk = find_task_by_pid(pid)) != NULL) {
+ get_task_struct(tsk);
+ read_unlock(&tasklist_lock);
+ CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_RECLASSIFY, tsk);
+ put_task_struct(tsk);
+ } else {
+ read_unlock(&tasklist_lock);
+ rc = -EINVAL;
+ }
+ }
+ up(&async_serializer);
+ return rc;
}
/*
atomic_read(&cls->core.hnode.parent->refcnt));
// If no CE registered for this classtype, following will be needed
// repeatedly;
- ce_regd = atomic_read(&class_core(cls)->classtype->ce_regd);
+ ce_regd = class_core(cls)->classtype->ce_regd;
cnode = &(class_core(cls)->hnode);
parcls = class_type(ckrm_task_class_t, cnode->parent);
}
/*
- * Change the core class of the given task
+ * Change the core class of the given task.
*/
int ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls)
{
struct task_struct *tsk;
- if (cls && !ckrm_validate_and_grab_core(class_core(cls)))
+ if (!ckrm_validate_and_grab_core(class_core(cls)))
return -EINVAL;
read_lock(&tasklist_lock);
if ((tsk = find_task_by_pid(pid)) == NULL) {
read_unlock(&tasklist_lock);
- if (cls)
- ckrm_core_drop(class_core(cls));
+ ckrm_core_drop(class_core(cls));
return -EINVAL;
}
get_task_struct(tsk);
/* Check permissions */
if ((!capable(CAP_SYS_NICE)) &&
(!capable(CAP_SYS_RESOURCE)) && (current->user != tsk->user)) {
- if (cls)
- ckrm_core_drop(class_core(cls));
+ ckrm_core_drop(class_core(cls));
put_task_struct(tsk);
return -EPERM;
}
- ce_protect(&CT_taskclass);
- if (cls == NULL)
- CE_CLASSIFY_TASK(CKRM_EVENT_RECLASSIFY,tsk);
- else
- ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL);
+ down(&async_serializer); // protect again race condition
+ ce_protect(&CT_taskclass);
+ ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL);
ce_release(&CT_taskclass);
put_task_struct(tsk);
+ up(&async_serializer);
return 0;
}
void __init ckrm_meta_init_taskclass(void)
{
- printk(KERN_DEBUG "...... Initializing ClassType<%s> ........\n",
+ printk("...... Initializing ClassType<%s> ........\n",
CT_taskclass.name);
// intialize the default class
ckrm_init_core_class(&CT_taskclass, class_core(&taskclass_dflt_class),
pid_t pid;
int rc = -EINVAL;
- pid = (pid_t) simple_strtol(obj, NULL, 0);
-
- down(&async_serializer); // protect again race condition with reclassify_class
- if (pid < 0) {
- // do we want to treat this as process group .. TBD
- rc = -EINVAL;
- } else if (pid == 0) {
- rc = (target == NULL) ? ckrm_reclassify_all_tasks() : -EINVAL;
- } else {
- struct ckrm_task_class *cls = NULL;
- if (target)
- cls = class_type(ckrm_task_class_t,target);
- rc = ckrm_forced_reclassify_pid(pid,cls);
+ pid = (pid_t) simple_strtoul(obj, NULL, 10);
+ if (pid > 0) {
+ rc = ckrm_forced_reclassify_pid(pid,
+ class_type(ckrm_task_class_t,
+ target));
}
- up(&async_serializer);
return rc;
}
-#if 0
+#if 1
/******************************************************************************
* Debugging Task Classes: Utility functions
class_lock(core);
if (list_empty(&core->objlist)) {
class_lock(core);
- printk(KERN_DEBUG "check_tasklist_sanity: class %s empty list\n",
+ printk("check_tasklist_sanity: class %s empty list\n",
core->name);
return;
}
container_of(lh1, struct task_struct,
taskclass_link);
if (count++ > 20000) {
- printk(KERN_WARNING "list is CORRUPTED\n");
+ printk("list is CORRUPTED\n");
break;
}
if (tsk->taskclass != cls) {
const char *tclsname;
tclsname = (tsk->taskclass) ?
class_core(tsk->taskclass)->name:"NULL";
- printk(KERN_WARNING "sanity: task %s:%d has ckrm_core "
+ printk("sanity: task %s:%d has ckrm_core "
"|%s| but in list |%s|\n", tsk->comm,
tsk->pid, tclsname, core->name);
}
struct task_struct *proc, *thread;
int count = 0;
- printk(KERN_DEBUG "Analyze Error <%s> %d\n",
+ printk("Analyze Error <%s> %d\n",
class_core(tskcls)->name,
atomic_read(&(class_core(tskcls)->refcnt)));
const char *tclsname;
tclsname = (thread->taskclass) ?
class_core(thread->taskclass)->name :"NULL";
- printk(KERN_DEBUG "%d thread=<%s:%d> -> <%s> <%lx>\n", count,
+ printk("%d thread=<%s:%d> -> <%s> <%lx>\n", count,
thread->comm, thread->pid, tclsname,
thread->flags & PF_EXITING);
}
class_unlock(class_core(tskcls));
read_unlock(&tasklist_lock);
- printk(KERN_DEBUG "End Analyze Error <%s> %d\n",
+ printk("End Analyze Error <%s> %d\n",
class_core(tskcls)->name,
atomic_read(&(class_core(tskcls)->refcnt)));
}
return;
}
if (vec == NULL) {
- printk(KERN_DEBUG "v<0>-NULL\n");
+ printk("v<0>-NULL\n");
return;
}
- printk(KERN_DEBUG "v<%d>-", sz = vec->size);
+ printk("v<%d>-", sz = vec->size);
for (i = 0; i < sz; i++) {
- printk(KERN_DEBUG "%c", test_bit(i, vec->bits) ? '1' : '0');
+ printk("%c", test_bit(i, vec->bits) ? '1' : '0');
}
return;
}
static char *info =
"1. Magic files\n"
"\t|--rbce_info - read only file detailing how to setup and use RBCE.\n\n"
+ "\t|--rbce_reclassify - contains nothing. Writing a pid to it"
+ "reclassifies\n"
+ "\tthe given task according to the current set of rules.\n"
+ "\tWriting 0 to it reclassifies all tasks in the system according to the \n"
+ "\tsurrent set of rules. This is typically done by the user/sysadmin \n"
+ "\tafter changing/creating rules. \n\n"
"\t|--rbce_state - determines whether RBCE is currently active"
" or inactive.\n"
"\tWriting 1 (0) activates (deactivates) the CE. Reading the file\n"
-/* RCFS API for Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- * (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Module for loading of classification policies and providing
- * a user API for Class-based Kernel Resource Management (CKRM)
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
+/*
+ * This file is released under the GPL.
*/
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/errno.h>
if (*ptr == '\n') {
*ptr = '\0';
}
+#if 0
+ if (!strcmp(file->f_dentry->d_name.name, "rbce_reclassify")) {
+ pid = simple_strtol(line, NULL, 0);
+ rc = reclassify_pid(pid);
+ } else
+#endif
if (!strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
pid = simple_strtol(line, &ptr, 0);
rc = set_tasktag(pid, ptr + 1); // expected syntax "pid tag"
char result[256];
memset(result, 0, 256);
- if (!strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
+ if (!strcmp(file->f_dentry->d_name.name, "rbce_reclassify") ||
+ !strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
return -EPERM;
}
if (!strcmp(file->f_dentry->d_name.name, "rbce_state")) {
{
const char *name = file->f_dentry->d_name.name;
- if (strcmp(name, "rbce_state") &&
+ if (strcmp(name, "rbce_reclassify") &&
+ strcmp(name, "rbce_state") &&
strcmp(name, "rbce_tag") && strcmp(name, "rbce_info")) {
if (!rule_exists(name)) {
struct dentry *pd =
list_entry(dir->i_dentry.next, struct dentry, d_alias);
- // Under /ce only "rbce_state", "rbce_tag" and "rbce_info" are allowed
+ // Under /ce only "rbce_reclassify", "rbce_state", "rbce_tag" and
+ // "rbce_info" are allowed
if (!strcmp(pd->d_name.name, "ce")) {
- if (strcmp(dentry->d_name.name, "rbce_state") &&
+ if (strcmp(dentry->d_name.name, "rbce_reclassify") &&
+ strcmp(dentry->d_name.name, "rbce_state") &&
strcmp(dentry->d_name.name, "rbce_tag") &&
strcmp(dentry->d_name.name, "rbce_info")) {
return -EINVAL;
/******************************* Magic files ********************/
-#define RBCE_NR_MAGF 5
+#define RBCE_NR_MAGF 6
struct rcfs_magf rbce_magf_files[RBCE_NR_MAGF] = {
{
.name = "ce",
.mode = RCFS_DEFAULT_FILE_MODE,
.i_fop = &rbce_file_operations,
},
+ {
+ .name = "rbce_reclassify",
+ .mode = RCFS_DEFAULT_FILE_MODE,
+ .i_fop = &rbce_file_operations,
+ },
{
.name = "rules",
.mode = (RCFS_DEFAULT_DIR_MODE | S_IWUSR),
static void rbce_put_super(struct super_block *sb)
{
module_put(THIS_MODULE);
- printk(KERN_DEBUG "rbce_put_super called\n");
+ printk("rbce_put_super called\n");
}
static struct super_operations rbce_ops = {
-/* Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
+/* Rule-based Classification Engine (RBCE) module
*
* Copyright (C) Hubertus Franke, IBM Corp. 2003
* (C) Chandra Seetharaman, IBM Corp. 2003
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
/* Changes
#include <linux/ckrm_ce.h>
#include <linux/ckrm_net.h>
#include "bitvector.h"
-#include <linux/rbce.h>
+#include "rbce.h"
#define DEBUG
RBCE_RULE_APP_TAG, // task's application tag
RBCE_RULE_IPV4, // IP address of listen(), ipv4 format
RBCE_RULE_IPV6, // IP address of listen(), ipv6 format
- RBCE_RULE_XID, // VSERVER
RBCE_RULE_DEP_RULE, // dependent rule; must be the first term
RBCE_RULE_INVALID, // invalid, for filler
RBCE_RULE_INVALID2, // invalid, for filler
#define RBCE_TERM_TAG (3)
#define RBCE_TERM_IPV4 (4)
#define RBCE_TERM_IPV6 (5)
-#define RBCE_TERM_XID (6)
-#define NUM_TERM_MASK_VECTOR (7) // must be one more the last RBCE_TERM_...
+#define NUM_TERM_MASK_VECTOR (6)
// Rule flags. 1 bit for each type of rule term
#define RBCE_TERMFLAG_CMD (1 << RBCE_TERM_CMD)
#define RBCE_TERMFLAG_TAG (1 << RBCE_TERM_TAG)
#define RBCE_TERMFLAG_IPV4 (1 << RBCE_TERM_IPV4)
#define RBCE_TERMFLAG_IPV6 (1 << RBCE_TERM_IPV6)
-#define RBCE_TERMFLAG_XID (1 << RBCE_TERM_XID)
-#define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \
- RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | RBCE_TERMFLAG_XID | \
- RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6)
+#define RBCE_TERMFLAG_ALL (RBCE_TERMFLAG_CMD | RBCE_TERMFLAG_UID | \
+ RBCE_TERMFLAG_GID | RBCE_TERMFLAG_TAG | \
+ RBCE_TERMFLAG_IPV4 | RBCE_TERMFLAG_IPV6)
int termop_2_vecidx[RBCE_RULE_INVALID] = {
[RBCE_RULE_CMD_PATH] = RBCE_TERM_CMD,
[RBCE_RULE_REAL_GID] = RBCE_TERM_GID,
[RBCE_RULE_EFFECTIVE_UID] = RBCE_TERM_UID,
[RBCE_RULE_EFFECTIVE_GID] = RBCE_TERM_GID,
- [RBCE_RULE_XID] = RBCE_TERM_XID,
[RBCE_RULE_APP_TAG] = RBCE_TERM_TAG,
[RBCE_RULE_IPV4] = RBCE_TERM_IPV4,
[RBCE_RULE_IPV6] = RBCE_TERM_IPV6,
#define POLICY_ACTION_REDO_ALL 0x02 // Recompute all rule flags
#define POLICY_ACTION_PACK_TERMS 0x04 // Time to pack the terms
-const int use_persistent_state = 1;
-
struct ckrm_eng_callback ckrm_ecbs;
// Term vector state
#define DBG_RULE ( 0x20 )
#define DBG_POLICY ( 0x40 )
-#define DPRINTK(x, y...) if (rbcedebug & (x)) printk(KERN_DEBUG y)
+#define DPRINTK(x, y...) if (rbcedebug & (x)) printk(y)
// debugging selectively enabled through /proc/sys/debug/rbce
static void print_context_vectors(void)
return;
}
for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
- printk(KERN_DEBUG "%d: ", i);
+ printk("%d: ", i);
bitvector_print(DBG_OPTIMIZATION, gl_mask_vecs[i]);
- printk(KERN_DEBUG "\n");
+ printk("\n");
}
}
#else
#define print_context_vectors(x)
#endif
-/* ====================== VSERVER support ========================== */
-#define CONFIG_VSERVER
-#ifdef CONFIG_VSERVER
-#include <linux/vs_base.h>
-#else
-typedef unsigned int xid_t;
-#define vx_task_xid(t) (0)
-#endif
-
/* ======================= Helper Functions ========================= */
#include "token.c"
}
notify_class_action(cls, 0);
cls->classobj = NULL;
- list_for_each_entry(pos, &rules_list[classtype], link) {
+ list_for_each_entry(pos, &rules_list[cls->classtype], link) {
rule = (struct rbce_rule *)pos;
if (rule->target_class) {
if (!strcmp
}
}
}
+ put_class(cls);
if ((cls = find_class_name(classname)) != NULL) {
printk(KERN_ERR
"rbce ERROR: class %s exists in rbce after "
case RBCE_RULE_REAL_GID:
case RBCE_RULE_EFFECTIVE_UID:
case RBCE_RULE_EFFECTIVE_GID:
- case RBCE_RULE_XID:
term->u.id = terms[i].u.id;
break;
goto handleid;
case RBCE_RULE_EFFECTIVE_GID:
strcpy(idtype, "eg");
- goto handleid;
- case RBCE_RULE_XID:
- strcpy(idtype, "x");
handleid:
if (term->operator == RBCE_LESS_THAN) {
oper = '<';
static struct rbce_private_data *create_private_data(struct rbce_private_data *,
int);
-static inline
-void reset_evaluation(struct rbce_private_data *pdata,int termflag)
+int rbce_ckrm_reclassify(int pid)
{
- /* reset TAG ruleterm evaluation results to pick up
- * on next classification event
- */
- if (use_persistent_state && gl_mask_vecs[termflag]) {
- bitvector_and_not( pdata->eval, pdata->eval,
- gl_mask_vecs[termflag] );
- bitvector_and_not( pdata->true, pdata->true,
- gl_mask_vecs[termflag] );
- }
+ printk("ckrm_reclassify_pid ignored\n");
+ return -EINVAL;
+}
+
+int reclassify_pid(int pid)
+{
+ struct task_struct *tsk;
+
+ // FIXME: Need to treat -pid as process group
+ if (pid < 0) {
+ return -EINVAL;
+ }
+
+ if (pid == 0) {
+ rbce_ckrm_reclassify(0); // just reclassify all tasks.
+ }
+ // if pid is +ve take control of the task, start evaluating it
+ if ((tsk = find_task_by_pid(pid)) == NULL) {
+ return -EINVAL;
+ }
+
+ if (unlikely(!RBCE_DATA(tsk))) {
+ RBCE_DATAP(tsk) = create_private_data(NULL, 0);
+ if (!RBCE_DATA(tsk)) {
+ return -ENOMEM;
+ }
+ }
+ RBCE_DATA(tsk)->evaluate = 1;
+ rbce_ckrm_reclassify(pid);
+ return 0;
}
-
+
int set_tasktag(int pid, char *tag)
{
char *tp;
- int rc = 0;
struct task_struct *tsk;
struct rbce_private_data *pdata;
- int len;
if (!tag) {
return -EINVAL;
}
- len = strlen(tag) + 1;
- tp = kmalloc(len, GFP_ATOMIC);
- if (!tp) {
- return -ENOMEM;
- }
- strncpy(tp,tag,len);
- read_lock(&tasklist_lock);
if ((tsk = find_task_by_pid(pid)) == NULL) {
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
+ }
+
+ tp = kmalloc(strlen(tag) + 1, GFP_ATOMIC);
+
+ if (!tp) {
+ return -ENOMEM;
}
if (unlikely(!RBCE_DATA(tsk))) {
RBCE_DATAP(tsk) = create_private_data(NULL, 0);
if (!RBCE_DATA(tsk)) {
- rc = -ENOMEM;
- goto out;
+ kfree(tp);
+ return -ENOMEM;
}
}
pdata = RBCE_DATA(tsk);
kfree(pdata->app_tag);
}
pdata->app_tag = tp;
- reset_evaluation(pdata,RBCE_TERMFLAG_TAG);
-
- out:
- read_unlock(&tasklist_lock);
- if (rc != 0)
- kfree(tp);
- return rc;
+ strcpy(pdata->app_tag, tag);
+ rbce_ckrm_reclassify(pid);
+
+ return 0;
}
/*====================== Classification Functions =======================*/
no_ip = 0;
break;
- case RBCE_RULE_XID:
- {
- xid_t xid = vx_task_xid(tsk);
-
- if (term->operator == RBCE_LESS_THAN) {
- rc = (xid < term->u.id);
- } else if (term->operator == RBCE_GREATER_THAN) {
- rc = (xid > term->u.id);
- } else if (term->operator == RBCE_NOT) {
- rc = (xid != term->u.id);
- } else {
- rc = (xid == term->u.id);
- }
- break;
- }
-
default:
rc = 0;
printk(KERN_ERR "Error evaluate term op=%d\n",
}
}
spin_unlock(&pdata_lock);
- printk(KERN_WARNING "INVALID/CORRUPT PDATA %p\n", pdata);
+ printk("INVALID/CORRUPT PDATA %p\n", pdata);
return 0;
}
while (i < MAX_PDATA) {
if (pdata_arr[pdata_next] == NULL) {
- printk(KERN_DEBUG "storing %p at %d, count %d\n", pdata,
+ printk("storing %p at %d, count %d\n", pdata,
pdata_next, pdata_count);
pdata_arr[pdata_next++] = pdata;
if (pdata_next == MAX_PDATA) {
spin_unlock(&pdata_lock);
}
if (i == MAX_PDATA) {
- printk(KERN_DEBUG "PDATA BUFFER FULL pdata_count %d pdata %p\n",
+ printk("PDATA BUFFER FULL pdata_count %d pdata %p\n",
pdata_count, pdata);
}
}
spin_lock(&pdata_lock);
for (i = 0; i < MAX_PDATA; i++) {
if (pdata_arr[i] == pdata) {
- printk(KERN_DEBUG "unstoring %p at %d, count %d\n", pdata,
+ printk("unstoring %p at %d, count %d\n", pdata,
i, pdata_count);
pdata_arr[i] = NULL;
pdata_count--;
}
spin_unlock(&pdata_lock);
if (i == MAX_PDATA) {
- printk(KERN_DEBUG "pdata %p not found in the stored array\n",
+ printk("pdata %p not found in the stored array\n",
pdata);
}
}
#endif // PDATA_DEBUG
+const int use_persistent_state = 1;
+
/*
* Allocate and initialize a rbce_private_data data structure.
*
// pdata->evaluate = src->evaluate;
// if(src->app_tag) {
// int len = strlen(src->app_tag)+1;
- // printk(KERN_DEBUG "CREATE_PRIVATE: apptag %s len %d\n",
+ // printk("CREATE_PRIVATE: apptag %s len %d\n",
// src->app_tag,len);
// pdata->app_tag = kmalloc(len, GFP_ATOMIC);
// if (pdata->app_tag) {
AENT(EXEC),
AENT(UID),
AENT(GID),
- AENT(XID),
AENT(LOGIN),
AENT(USERADD),
AENT(USERDEL),
va_list args;
void *cls = NULL;
struct task_struct *tsk;
- struct rbce_private_data *pdata;
va_start(args, event);
tsk = va_arg(args, struct task_struct *);
* [ CKRM_LATCHABLE_EVENTS .. CKRM_NONLATCHABLE_EVENTS )
*/
- // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
+ // printk("tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
// tsk->comm,event_names[event]);
switch (event) {
cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_GID, tc_classtype);
break;
- case CKRM_EVENT_XID:
- cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_XID, tc_classtype);
- break;
-
case CKRM_EVENT_LOGIN:
case CKRM_EVENT_USERADD:
case CKRM_EVENT_USERDEL:
break;
case CKRM_EVENT_RECLASSIFY:
- if ((pdata = (RBCE_DATA(tsk)))) {
- pdata->evaluate = 1;
- }
cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype);
break;
}
- // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
+ // printk("tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
// tsk->comm,event_names[event],cls);
return cls;
#ifndef RBCE_EXTENSION
static void rbce_tc_notify(int event, void *core, struct task_struct *tsk)
{
- printk(KERN_DEBUG "tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
+ printk("tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
event_names[event]);
if (event != CKRM_EVENT_MANUAL)
return;
{NULL}
};
-static void unregister_classtype_engines(void)
- {
+static int register_classtype_engines(void)
+{
int rc;
struct ce_regtable_struct *ceptr = ce_regtable;
while (ceptr->name) {
- if (*ceptr->clsvar >= 0) {
- printk(KERN_DEBUG "ce unregister with <%s>\n",ceptr->name);
- while ((rc = ckrm_unregister_engine(ceptr->name)) == -EAGAIN)
- ;
- printk(KERN_DEBUG "ce unregister with <%s> rc=%d\n",ceptr->name,rc);
- *ceptr->clsvar = -1;
- }
+ rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
+ printk("ce register with <%s> typeId=%d\n", ceptr->name, rc);
+ if ((rc < 0) && (rc != -ENOENT))
+ return (rc);
+ if (rc != -ENOENT)
+ *ceptr->clsvar = rc;
ceptr++;
}
- }
+ return 0;
+}
-static int register_classtype_engines(void)
+static void unregister_classtype_engines(void)
{
int rc;
struct ce_regtable_struct *ceptr = ce_regtable;
while (ceptr->name) {
- rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
- printk(KERN_DEBUG "ce register with <%s> typeId=%d\n",ceptr->name,rc);
- if ((rc < 0) && (rc != -ENOENT)) {
- unregister_classtype_engines();
- return (rc);
+ if (*ceptr->clsvar >= 0) {
+ printk("ce unregister with <%s>\n", ceptr->name);
+ rc = ckrm_unregister_engine(ceptr->name);
+ printk("ce unregister with <%s> rc=%d\n", ceptr->name,
+ rc);
+ *ceptr->clsvar = -1;
}
- if (rc != -ENOENT)
- *ceptr->clsvar = rc;
ceptr++;
}
- return 0;
}
// =========== /proc/sysctl/debug/rbce debug stuff =============
{
int rc, i, line;
- printk(KERN_DEBUG "<1>\nInstalling \'%s\' module\n", modname);
+ printk("<1>\nInstalling \'%s\' module\n", modname);
for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
INIT_LIST_HEAD(&rules_list[i]);
exit_rbce_ext();
out:
- printk(KERN_DEBUG "<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
+ printk("<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
line);
return rc;
}
{
int i;
- printk(KERN_DEBUG "<1>Removing \'%s\' module\n", modname);
+ printk("<1>Removing \'%s\' module\n", modname);
stop_debug();
exit_rbce_ext();
// Print warnings if lists are not empty, which is a bug
if (!list_empty(&class_list)) {
- printk(KERN_DEBUG "exit_rbce: Class list is not empty\n");
+ printk("exit_rbce: Class list is not empty\n");
}
for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
if (!list_empty(&rules_list[i])) {
- printk(KERN_DEBUG "exit_rbce: Rules list for classtype %d"
+ printk("exit_rbce: Rules list for classtype %d"
" is not empty\n", i);
}
}
EXPORT_SYMBOL(change_rule);
EXPORT_SYMBOL(delete_rule);
EXPORT_SYMBOL(rename_rule);
+EXPORT_SYMBOL(reclassify_pid);
EXPORT_SYMBOL(set_tasktag);
module_init(init_rbce);
module_exit(exit_rbce);
-
-
* Copyright (C) Hubertus Franke, IBM Corp. 2003
*
* Extension to be included into RBCE to collect delay and sample information
- * Requires user daemon e.g. crbcedmn to activate.
+ * requires user daemon <crbcedmn> to activate.
*
* Latest version, more details at http://ckrm.sf.net
*
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
-
/*******************************************************************************
*
* User-Kernel Communication Channel (UKCC)
{
static int readers = 0;
if (fileop == RELAY_FILE_OPEN) {
- // printk(KERN_DEBUG "got fileop_notify RELAY_FILE_OPEN for file %p\n",
+ // printk("got fileop_notify RELAY_FILE_OPEN for file %p\n",
// filp);
if (readers) {
- printk(KERN_DEBUG "only one client allowed, backoff .... \n");
+ printk("only one client allowed, backoff .... \n");
return -EPERM;
}
if (!try_module_get(THIS_MODULE))
client_attached();
} else if (fileop == RELAY_FILE_CLOSE) {
- // printk(KERN_DEBUG "got fileop_notify RELAY_FILE_CLOSE for file %p\n",
+ // printk("got fileop_notify RELAY_FILE_CLOSE for file %p\n",
// filp);
client_detached();
readers--;
channel_flags,
&ukcc_callbacks, 0, 0, 0, 0, 0, 0, NULL, 0);
if (ukcc_channel < 0)
- printk(KERN_DEBUG "crbce: ukcc creation failed, errcode: %d\n",
+ printk("crbce: ukcc creation failed, errcode: %d\n",
ukcc_channel);
else
- printk(KERN_DEBUG "crbce: ukcc created (%u KB)\n",
+ printk("crbce: ukcc created (%u KB)\n",
UKCC_TOTAL_BUFFER_SIZE >> 10);
return ukcc_channel;
}
(r),(l),-1,NULL) > 0); \
chan_state = chan_isok ? UKCC_OK : UKCC_STANDBY; \
if (chan_wasok && !chan_isok) { \
- printk(KERN_DEBUG "Channel stalled\n"); \
+ printk("Channel stalled\n"); \
} else if (!chan_wasok && chan_isok) { \
- printk(KERN_DEBUG "Channel continues\n"); \
+ printk("Channel continues\n"); \
} \
} while (0)
return 0;
pdata = RBCE_DATA(tsk);
if (pdata == NULL) {
- // printk(KERN_DEBUG "send [%d]<%s>: no pdata\n",tsk->pid,tsk->comm);
+ // printk("send [%d]<%s>: no pdata\n",tsk->pid,tsk->comm);
return 0;
}
if (send_forced || (delta_mode == 0)
rec_set_timehdr(&limrec, CRBCE_REC_DATA_DELIMITER, 0, 0);
rec_send(&limrec);
- // printk(KERN_DEBUG "send_task_data mode=%d t#=%d s#=%d\n",
+ // printk("send_task_data mode=%d t#=%d s#=%d\n",
// delta_mode,taskcnt,sendcnt);
}
}
while_each_thread(proc, thread);
read_unlock(&tasklist_lock);
-// printk(KERN_DEBUG "sample_timer: run=%d wait=%d\n",run,wait);
+// printk("sample_timer: run=%d wait=%d\n",run,wait);
start_sample_timer();
}
struct crbce_cmd_done cmdret;
int rc = 0;
-// printk(KERN_DEBUG "ukcc_cmd_deliver: %d %d len=%d:%d\n",cmdrec->type,
+// printk("ukcc_cmd_deliver: %d %d len=%d:%d\n",cmdrec->type,
// cmdrec->cmd,cmdrec->len,len);
cmdrec->len = len; // add this to reflection so the user doesn't
cmdret.hdr.cmd = cmdrec->cmd;
cmdret.rc = rc;
rec_send(&cmdret);
-// printk(KERN_DEBUG "ukcc_cmd_deliver ACK: %d %d rc=%d %d\n",cmdret.hdr.type,
+// printk("ukcc_cmd_deliver ACK: %d %d rc=%d %d\n",cmdret.hdr.type,
// cmdret.hdr.cmd,rc,sizeof(cmdret));
}
static void client_attached(void)
{
- printk(KERN_DEBUG "client [%d]<%s> attached to UKCC\n", current->pid,
+ printk("client [%d]<%s> attached to UKCC\n", current->pid,
current->comm);
relay_reset(ukcc_channel);
}
static void client_detached(void)
{
- printk(KERN_DEBUG "client [%d]<%s> detached to UKCC\n", current->pid,
+ printk("client [%d]<%s> detached to UKCC\n", current->pid,
current->comm);
chan_state = UKCC_STANDBY;
stop_sample_timer();
-/* Tokens for Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- * (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
#include <linux/parser.h>
#include <linux/ctype.h>
TOKEN_EGID_LT,
TOKEN_EGID_GT,
TOKEN_EGID_NOT,
- TOKEN_XID_EQ,
- TOKEN_XID_LT,
- TOKEN_XID_GT,
- TOKEN_XID_NOT,
TOKEN_TAG,
TOKEN_IPV4,
TOKEN_IPV6,
[TOKEN_EGID_LT] = RBCE_RULE_EFFECTIVE_GID,
[TOKEN_EGID_GT] = RBCE_RULE_EFFECTIVE_GID,
[TOKEN_EGID_NOT] = RBCE_RULE_EFFECTIVE_GID,
- [TOKEN_XID_EQ] = RBCE_RULE_XID,
- [TOKEN_XID_LT] = RBCE_RULE_XID,
- [TOKEN_XID_GT] = RBCE_RULE_XID,
- [TOKEN_XID_NOT] = RBCE_RULE_XID,
[TOKEN_TAG] = RBCE_RULE_APP_TAG,
[TOKEN_IPV4] = RBCE_RULE_IPV4,
[TOKEN_IPV6] = RBCE_RULE_IPV6,
[TOKEN_EGID_LT] = TOKEN_OP_LESS_THAN,
[TOKEN_EGID_GT] = TOKEN_OP_GREATER_THAN,
[TOKEN_EGID_NOT] = TOKEN_OP_NOT,
- [TOKEN_XID_EQ] = TOKEN_OP_EQUAL,
- [TOKEN_XID_LT] = TOKEN_OP_LESS_THAN,
- [TOKEN_XID_GT] = TOKEN_OP_GREATER_THAN,
- [TOKEN_XID_NOT] = TOKEN_OP_NOT,
[TOKEN_TAG] = TOKEN_OP_EQUAL,
[TOKEN_IPV4] = TOKEN_OP_EQUAL,
[TOKEN_IPV6] = TOKEN_OP_EQUAL,
{TOKEN_EGID_LT, "egid<%d"},
{TOKEN_EGID_GT, "egid>%d"},
{TOKEN_EGID_NOT, "egid!%d"},
- {TOKEN_XID_EQ, "xid=%d"},
- {TOKEN_XID_LT, "xid<%d"},
- {TOKEN_XID_GT, "xid>%d"},
- {TOKEN_XID_NOT, "xid!%d"},
{TOKEN_TAG, "tag=%s"},
{TOKEN_IPV4, "ipv4=%s"},
{TOKEN_IPV6, "ipv6=%s"},
nterms = 0;
while (*rp++) {
- if (*rp == '>' || *rp == '<' || *rp == '=' || *rp == '!') {
+ if (*rp == '>' || *rp == '<' || *rp == '=') {
nterms++;
}
}
case TOKEN_EGID_LT:
case TOKEN_EGID_GT:
case TOKEN_EGID_NOT:
- case TOKEN_XID_EQ:
- case TOKEN_XID_LT:
- case TOKEN_XID_GT:
- case TOKEN_XID_NOT:
// all these tokens can be specified only once
if (*term_mask & (1 << terms[i].op)) {
nterms = -EINVAL;
*term_mask = 0;
} /* else {
for (i = 0; i < nterms; i++) {
- printk(KERN_DEBUG "token: i %d; op %d, operator %d, str %ld\n",
+ printk("token: i %d; op %d, operator %d, str %ld\n",
i, terms[i].op, terms[i].operator, terms[i].u.id);
}
} */
//add to new positon, round robin for classes with same priority
list_add_tail(&(node->list), &cq->array.queue[index]);
- __set_bit(index, cq->array.bitmap);
+ __set_bit(index, cq->array.bitmap);
+
node->index = index;
}
-/**
- *classqueue_get_min_prio: return the priority of the last node in queue
- *
- * this function can be called without runqueue lock held
- */
-static inline int classqueue_get_min_prio(struct classqueue_struct *cq)
-{
- cq_node_t *result = NULL;
- int pos;
-
- /*
- * search over the bitmap to get the first class in the queue
- */
- pos = find_next_bit(cq->array.bitmap, CLASSQUEUE_SIZE, cq->base_offset);
- //do circular search from the beginning
- if (pos >= CLASSQUEUE_SIZE)
- pos = find_first_bit(cq->array.bitmap, CLASSQUEUE_SIZE);
-
- if (pos < CLASSQUEUE_SIZE) {
- result = list_entry(cq->array.queue[pos].next, cq_node_t, list);
- if (list_empty(&cq->array.queue[pos]))
- result = NULL;
- }
- if (result)
- return result->prio;
- else
- return 0;
-}
-
-/**
- * this function must be called with runqueue lock held
- */
cq_node_t *classqueue_get_head(struct classqueue_struct *cq)
{
cq_node_t *result = NULL;
* search over the bitmap to get the first class in the queue
*/
pos = find_next_bit(cq->array.bitmap, CLASSQUEUE_SIZE, cq->base_offset);
- //do circular search from the beginning
- if (pos >= CLASSQUEUE_SIZE)
+ if (pos >= CLASSQUEUE_SIZE) { //do circular search from the beginning
pos = find_first_bit(cq->array.bitmap, CLASSQUEUE_SIZE);
+ }
if (pos < CLASSQUEUE_SIZE) {
BUG_ON(list_empty(&cq->array.queue[pos]));
* Moving the end of queue forward
* the new_base here is logical, we need to translate to the abosule position
*/
-void classqueue_update_base(struct classqueue_struct *cq)
+void classqueue_update_base(struct classqueue_struct *cq, int new_base)
{
- int new_base;
-
- if (! cq_nr_member(cq)) {
+ if (!cq_nr_member(cq)) {
cq->base_offset = -1; //not defined
return;
}
- new_base = classqueue_get_min_prio(cq);
-
+ // assert(new_base >= cq->base);
+
if (new_base > cq->base) {
cq->base_offset = get_index(cq, &new_base);
cq->base = new_base;
#include <linux/init.h>
#include <linux/ckrm_sched.h>
-rwlock_t class_list_lock = RW_LOCK_UNLOCKED;
-LIST_HEAD(active_cpu_classes); // list of active cpu classes; anchor
-
-struct ckrm_cpu_class default_cpu_class_obj;
-
-struct ckrm_cpu_class * get_default_cpu_class(void) {
- return (&default_cpu_class_obj);
-}
-
/*******************************************************/
/* CVT Management */
/*******************************************************/
+#define CVT_WINDOW_SIZE (CLASSQUEUE_SIZE << CLASS_BONUS_RATE)
+static CVT_t max_CVT = CVT_WINDOW_SIZE;
-static inline void check_inactive_class(ckrm_lrq_t * lrq,CVT_t cur_cvt)
+/*
+ * Also ensure that the classes global cvt is upgraded to the
+ * minimum CVT in the system, as a class might not have run for a while
+ */
+static void update_global_cvt(struct ckrm_cpu_class *cpu_class, int cpu)
{
+ struct ckrm_local_runqueue *class_queue =
+ get_ckrm_local_runqueue(cpu_class, cpu);
CVT_t min_cvt;
- CVT_t bonus;
-
- //just a safty measure
- if (unlikely(! cur_cvt))
- return;
+ CVT_t local_cvt_old = class_queue->local_cvt;
-#ifndef INTERACTIVE_BONUS_SUPPORT
-#warning "ACB taking out interactive bonus calculation"
- bonus = 0;
-#else
- /*
- * Always leaving a small bonus for inactive classes
- * allows them to compete for cycles immediately when the become
- * active. This should improve interactive behavior
- */
- bonus = INTERACTIVE_BONUS(lrq);
+ spin_lock(&cvt_lock);
+ if (class_queue->uncounted_cvt) {
+ cpu_class->global_cvt += class_queue->uncounted_cvt;
+ class_queue->uncounted_cvt = 0;
+ }
+ min_cvt = max_CVT - CVT_WINDOW_SIZE;
+ if (cpu_class->global_cvt < min_cvt)
+ cpu_class->global_cvt = min_cvt;
+ else if (cpu_class->global_cvt > max_CVT)
+ max_CVT = cpu_class->global_cvt;
+
+/* update local cvt from global cvt*/
+#if 0
+ class_queue->local_cvt = cpu_class->global_cvt;
#endif
+ spin_unlock(&cvt_lock);
- //cvt can't be negative
- if (cur_cvt > bonus)
- min_cvt = cur_cvt - bonus;
- else
- min_cvt = 0;
-
- if (lrq->local_cvt < min_cvt) {
- CVT_t lost_cvt;
-
- lost_cvt = scale_cvt(min_cvt - lrq->local_cvt,lrq);
- lrq->local_cvt = min_cvt;
-
- /* add what the class lost to its savings*/
- lrq->savings += lost_cvt;
- if (lrq->savings > MAX_SAVINGS)
- lrq->savings = MAX_SAVINGS;
- } else if (lrq->savings) {
- /*
- *if a class saving and falling behind
- * then start to use it saving in a leaking bucket way
- */
- CVT_t savings_used;
-
- savings_used = scale_cvt((lrq->local_cvt - min_cvt),lrq);
- if (savings_used > lrq->savings)
- savings_used = lrq->savings;
-
- if (savings_used > SAVINGS_LEAK_SPEED)
- savings_used = SAVINGS_LEAK_SPEED;
-
- BUG_ON(lrq->savings < savings_used);
- lrq->savings -= savings_used;
- unscale_cvt(savings_used,lrq);
- BUG_ON(lrq->local_cvt < savings_used);
-#ifndef CVT_SAVINGS_SUPPORT
-#warning "ACB taking out cvt saving"
-#else
- lrq->local_cvt -= savings_used;
-#endif
- }
+ if (class_queue->local_cvt != local_cvt_old)
+ update_class_priority(class_queue);
}
/*
- * return the max_cvt of all the classes
- */
-static inline CVT_t get_max_cvt(int this_cpu)
-{
- struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t * lrq;
- CVT_t max_cvt;
-
- max_cvt = 0;
-
- /*update class time, at the same time get max_cvt */
- list_for_each_entry(clsptr, &active_cpu_classes, links) {
- lrq = get_ckrm_lrq(clsptr, this_cpu);
- if (lrq->local_cvt > max_cvt)
- max_cvt = lrq->local_cvt;
- }
-
- return max_cvt;
-}
-
-/**
- * update_class_cputime - updates cvt of inactive classes
- * -- an inactive class shouldn't starve others when it comes back
- * -- the cpu time it lost when it's inactive should be accumulated
- * -- its accumulated saving should be compensated (in a leaky bucket fashion)
- *
* class_list_lock must have been acquired
*/
-void update_class_cputime(int this_cpu)
+void update_global_cvts(int this_cpu)
{
struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t * lrq;
- CVT_t cur_cvt;
-
- /*
- * a class's local_cvt must not be significantly smaller than min_cvt
- * of active classes otherwise, it will starve other classes when it
- * is reactivated.
- *
- * Hence we keep all local_cvt's within a range of the min_cvt off
- * all active classes (approximated by the local_cvt of the currently
- * running class) and account for how many cycles where thus taken
- * from an inactive class building a savings (not to exceed a few seconds)
- * for a class to gradually make up upon reactivation, without
- * starvation of other classes.
- *
- */
- cur_cvt = get_local_cur_cvt(this_cpu);
+ struct ckrm_local_runqueue *class_queue;
- /*
- * cur_cvt == 0 means the system is now idle
- * in this case, we use max_cvt as cur_cvt
- * max_cvt roughly represents the cvt of the class
- * that has just finished running
- *
- * fairness wouldn't be a problem since we account for whatever lost in savings
- * if the system is not busy, the system responsiveness is not a problem.
- * still fine if the sytem is busy, but happened to be idle at this certain point
- * since bias toward interactive classes (class priority) is a more important way to improve system responsiveness
- */
- if (unlikely(! cur_cvt)) {
- cur_cvt = get_max_cvt(this_cpu);
- //return;
- }
-
- /*
- * - check the local cvt of all the classes
- * - update total_ns received by the class
- * - do a usage sampling for the whole class
- */
+ /*for each class*/
list_for_each_entry(clsptr, &active_cpu_classes, links) {
- lrq = get_ckrm_lrq(clsptr, this_cpu);
-
- spin_lock(&clsptr->stat.stat_lock);
- clsptr->stat.total_ns += lrq->uncounted_ns;
- ckrm_sample_usage(clsptr);
- spin_unlock(&clsptr->stat.stat_lock);
- lrq->uncounted_ns = 0;
-
- check_inactive_class(lrq,cur_cvt);
+ update_global_cvt(clsptr, this_cpu);
+ class_queue = get_ckrm_local_runqueue(clsptr, this_cpu);
+ clsptr->stat.total_ns += class_queue->uncounted_ns;
+ class_queue->uncounted_ns = 0;
}
}
-
-/*******************************************************/
-/* PID load balancing stuff */
-/*******************************************************/
-#define PID_SAMPLE_T 32
-#define PID_KP 20
-#define PID_KI 60
-#define PID_KD 20
-
-/**
- * sample pid load periodically
- */
-void ckrm_load_sample(ckrm_load_t* pid,int cpu)
-{
- long load;
- long err;
-
- if (jiffies % PID_SAMPLE_T)
- return;
-
- adjust_local_weight();
-
- load = ckrm_cpu_load(cpu);
- err = load - pid->load_p;
- pid->load_d = err;
- pid->load_p = load;
- pid->load_i *= 9;
- pid->load_i += load;
- pid->load_i /= 10;
-}
-
-long pid_get_pressure(ckrm_load_t* ckrm_load, int local_group)
-{
- long pressure;
- pressure = ckrm_load->load_p * PID_KP;
- pressure += ckrm_load->load_i * PID_KI;
- pressure += ckrm_load->load_d * PID_KD;
- pressure /= 100;
- return pressure;
-}
err = sys_clock_getres(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
- if (!err && tp && put_compat_timespec(&ts, tp))
+ if (!err && put_compat_timespec(&ts, tp))
return -EFAULT;
return err;
}
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/mempolicy.h>
+#include <linux/vs_limit.h>
+
#include <linux/ckrm.h>
#include <linux/ckrm_tsk.h>
-#include <linux/vs_limit.h>
-#include <linux/ckrm_mem.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
/* tasklist_lock is held, is this sufficient? */
if (p->vx_info) {
atomic_dec(&p->vx_info->cacct.nr_threads);
- atomic_dec(&p->vx_info->limit.rcur[RLIMIT_NPROC]);
+ vx_nproc_dec(p->vx_info);
+ // atomic_dec(&p->vx_info->limit.res[RLIMIT_NPROC]);
}
detach_pid(p, PIDTYPE_PID);
detach_pid(p, PIDTYPE_TGID);
struct file * file = xchg(&files->fd[i], NULL);
if (file) {
filp_close(file, files);
+ vx_openfd_dec(fd);
cond_resched();
+ } else {
+ vx_openfd_dec(fd);
}
- // vx_openfd_dec(fd);
}
i++;
set >>= 1;
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
-#ifdef CONFIG_CKRM_RES_MEM
- spin_lock(&mm->peertask_lock);
- list_del_init(&tsk->mm_peers);
- ckrm_mem_evaluate_mm(mm);
- spin_unlock(&mm->peertask_lock);
-#endif
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mmput(mm);
}
acct_process(code);
- if (current->tux_info) {
-#ifdef CONFIG_TUX_DEBUG
- printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
- code, __builtin_return_address(0));
-#endif
- current->tux_exit();
- }
__exit_mm(tsk);
exit_sem(tsk);
module_put(tsk->binfmt->module);
tsk->exit_code = code;
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
+ numtasks_put_ref(tsk->taskclass);
+#endif
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
if (p->real_parent != p->parent) {
__ptrace_unlink(p);
p->state = TASK_ZOMBIE;
- /*
- * If this is not a detached task, notify the parent. If it's
- * still not detached after that, don't release it now.
- */
- if (p->exit_signal != -1) {
+ /* If this is a detached thread, this is where it goes away. */
+ if (p->exit_signal == -1) {
+ /* release_task takes the lock itself. */
+ write_unlock_irq(&tasklist_lock);
+ release_task (p);
+ }
+ else {
do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1)
- p = NULL;
+ write_unlock_irq(&tasklist_lock);
}
+ p = NULL;
}
- write_unlock_irq(&tasklist_lock);
+ else
+ write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
#include <linux/fs.h>
#include <linux/cpu.h>
#include <linux/security.h>
-#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/rmap.h>
+
#include <linux/vs_network.h>
-#include <linux/vs_limit.h>
#include <linux/vs_memory.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_base.h>
+
#include <linux/ckrm.h>
#include <linux/ckrm_tsk.h>
-#include <linux/ckrm_mem_inline.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ vxdprintk("freeing up task %p\n", tsk);
clr_vx_info(&tsk->vx_info);
clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
ckrm_cb_newtask(tsk);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
-#ifdef CONFIG_CKRM_RES_MEM
- INIT_LIST_HEAD(&tsk->mm_peers);
-#endif
return tsk;
}
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
- mm->free_area_cache = oldmm->mmap_base;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->map_count = 0;
mm->rss = 0;
cpus_clear(mm->cpu_vm_mask);
mm->ioctx_list = NULL;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
-#ifdef CONFIG_CKRM_RES_MEM
- INIT_LIST_HEAD(&mm->tasklist);
- mm->peertask_lock = SPIN_LOCK_UNLOCKED;
-#endif
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+#ifdef __HAVE_ARCH_MMAP_TOP
+ mm->mmap_top = mmap_top();
+#endif
set_vx_info(&mm->mm_vx_info, current->vx_info);
return mm;
}
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm);
-#ifdef CONFIG_CKRM_RES_MEM
- mm->memclass = GET_MEM_CLASS(current);
- mem_class_get(mm->memclass);
-#endif
}
return mm;
}
mm_free_pgd(mm);
destroy_context(mm);
clr_vx_info(&mm->mm_vx_info);
-#ifdef CONFIG_CKRM_RES_MEM
- /* class can be null and mm's tasklist can be empty here */
- if (mm->memclass) {
- mem_class_put(mm->memclass);
- mm->memclass = NULL;
- }
-#endif
free_mm(mm);
}
spin_unlock(&mmlist_lock);
exit_aio(mm);
exit_mmap(mm);
- put_swap_token(mm);
mmdrop(mm);
}
}
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
- ckrm_init_mm_to_task(mm, tsk);
return 0;
free_pt:
goto fork_out;
retval = -ENOMEM;
+
p = dup_task_struct(current);
if (!p)
goto fork_out;
- p->tux_info = NULL;
p->vx_info = NULL;
set_vx_info(&p->vx_info, current->vx_info);
}
if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
if (!vx_rsspages_avail(p->mm, p->mm->rss))
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
}
retval = -EAGAIN;
if (!vx_nproc_avail(1))
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
if (atomic_read(&p->user->processes) >=
p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
}
atomic_inc(&p->user->__count);
}
#endif
+ retval = -ENOMEM;
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
} else
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
- p->ioprio = current->ioprio;
nr_threads++;
- /* p is copy of current */
- vxi = p->vx_info;
+ vxi = current->vx_info;
if (vxi) {
atomic_inc(&vxi->cacct.nr_threads);
- atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
+ // atomic_inc(&vxi->limit.res[RLIMIT_NPROC]);
}
+ vx_nproc_inc();
write_unlock_irq(&tasklist_lock);
retval = 0;
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
free_uid(p->user);
-bad_fork_cleanup_vm:
- if (p->mm && !(clone_flags & CLONE_VM))
- vx_pages_sub(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
bad_fork_free:
free_task(p);
goto fork_out;
struct task_struct * p = (struct task_struct *) __data;
unsigned long interval;
- if (send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p))
- printk("*warning*: failed to send SIGALRM to %u\n", p->pid);
-
+ send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
interval = p->it_real_incr;
if (interval) {
if (interval > (unsigned long) LONG_MAX)
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+#define symbol_is(literal, string) \
+ (strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0)
+
/* Protects module list */
static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/nmi.h>
-#ifdef CONFIG_KEXEC
-#include <linux/kexec.h>
-#endif
-int panic_timeout = 900;
-int panic_on_oops = 1;
+int panic_timeout;
+int panic_on_oops;
int tainted;
-void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
EXPORT_SYMBOL(panic_timeout);
-EXPORT_SYMBOL(dump_function_ptr);
struct notifier_block *panic_notifier_list;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
-
printk(KERN_EMERG "Kernel panic: %s\n",buf);
if (netdump_func)
BUG();
sys_sync();
bust_spinlocks(0);
- notifier_call_chain(&panic_notifier_list, 0, buf);
-
#ifdef CONFIG_SMP
smp_send_stop();
#endif
- if (panic_timeout > 0) {
+ notifier_call_chain(&panic_notifier_list, 0, buf);
+
+ if (panic_timeout > 0)
+ {
int i;
/*
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked..
*/
printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
-#ifdef CONFIG_KEXEC
-{
- struct kimage *image;
- image = xchg(&kexec_image, 0);
- if (image) {
- printk(KERN_EMERG "by starting a new kernel ..\n");
- mdelay(panic_timeout*1000);
- machine_kexec(image);
- }
- }
-#endif
for (i = 0; i < panic_timeout; i++) {
touch_nmi_watchdog();
mdelay(1000);
return "machine";
if(pmdisk_info.cpus != num_online_cpus())
return "number of cpus";
- return NULL;
+ return 0;
}
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
-static void rcu_move_batch(struct rcu_head *list)
+static void rcu_move_batch(struct list_head *list)
{
- int cpu;
+ struct list_head *entry;
+ int cpu = smp_processor_id();
local_irq_disable();
-
- cpu = smp_processor_id();
-
- while (list != NULL) {
- *RCU_nxttail(cpu) = list;
- RCU_nxttail(cpu) = &list->next;
- list = list->next;
+ while (!list_empty(list)) {
+ entry = list->next;
+ list_del(entry);
+ list_add_tail(entry, &RCU_nxtlist(cpu));
}
local_irq_enable();
}
spin_lock_bh(&rcu_state.mutex);
if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
cpu_quiet(cpu);
+unlock:
spin_unlock_bh(&rcu_state.mutex);
- rcu_move_batch(RCU_curlist(cpu));
- rcu_move_batch(RCU_nxtlist(cpu));
+ rcu_move_batch(&RCU_curlist(cpu));
+ rcu_move_batch(&RCU_nxtlist(cpu));
tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
}
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/kthread.h>
+#include <asm/tlb.h>
#include <linux/vserver/sched.h>
#include <linux/vs_base.h>
-#include <asm/tlb.h>
#include <asm/unistd.h>
#define cpu_to_node_mask(cpu) (cpu_online_map)
#endif
-/* used to soft spin in sched while dump is in progress */
-unsigned long dump_oncpu;
-EXPORT_SYMBOL(dump_oncpu);
-
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
#define LOW_CREDIT(p) \
((p)->interactive_credit < -CREDIT_LIMIT)
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-/*
- * if belong to different class, compare class priority
- * otherwise compare task priority
- */
-#define TASK_PREEMPTS_CURR(p, rq) \
- ( ((p)->cpu_class != (rq)->curr->cpu_class) \
- && ((rq)->curr != (rq)->idle) && ((p) != (rq)->idle )) \
- ? class_preempts_curr((p),(rq)->curr) \
- : ((p)->prio < (rq)->curr->prio)
-#else
-#define TASK_PREEMPTS_CURR(p, rq) \
- ((p)->prio < (rq)->curr->prio)
-#endif
-
/*
* BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
* to time slice values.
((MAX_TIMESLICE - MIN_TIMESLICE) * \
(MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1)))
-unsigned int task_timeslice(task_t *p)
+static unsigned int task_timeslice(task_t *p)
{
return BASE_TIMESLICE(p);
}
/*
* These are the runqueue data structures:
*/
-
typedef struct runqueue runqueue_t;
+
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
#include <linux/ckrm_classqueue.h>
-#include <linux/ckrm_sched.h>
+#endif
+
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+
+/**
+ * if belong to different class, compare class priority
+ * otherwise compare task priority
+ */
+#define TASK_PREEMPTS_CURR(p, rq) \
+ (((p)->cpu_class != (rq)->curr->cpu_class) && ((rq)->curr != (rq)->idle))? class_preempts_curr((p),(rq)->curr) : ((p)->prio < (rq)->curr->prio)
+#else
+#define TASK_PREEMPTS_CURR(p, rq) \
+ ((p)->prio < (rq)->curr->prio)
+#endif
/*
* This is the main, per-CPU runqueue data structure.
task_t *curr, *idle;
struct mm_struct *prev_mm;
#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ unsigned long ckrm_cpu_load;
struct classqueue_struct classqueue;
- ckrm_load_t ckrm_load;
#else
prio_array_t *active, *expired, arrays[2];
#endif
task_t *migration_thread;
struct list_head migration_queue;
#endif
-
-#ifdef CONFIG_VSERVER_HARDCPU
struct list_head hold_queue;
int idle_tokens;
-#endif
};
static DEFINE_PER_CPU(struct runqueue, runqueues);
# define task_running(rq, p) ((rq)->curr == (p))
#endif
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+#include <linux/ckrm_sched.h>
+spinlock_t cvt_lock = SPIN_LOCK_UNLOCKED;
+rwlock_t class_list_lock = RW_LOCK_UNLOCKED;
+LIST_HEAD(active_cpu_classes); // list of active cpu classes; anchor
+struct ckrm_cpu_class default_cpu_class_obj;
+
/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
+ * the minimum CVT allowed is the base_cvt
+ * otherwise, it will starve others
*/
-static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+CVT_t get_min_cvt(int cpu)
{
- struct runqueue *rq;
-
-repeat_lock_task:
- local_irq_save(*flags);
- rq = task_rq(p);
- spin_lock(&rq->lock);
- if (unlikely(rq != task_rq(p))) {
- spin_unlock_irqrestore(&rq->lock, *flags);
- goto repeat_lock_task;
- }
- return rq;
-}
+ cq_node_t *node;
+ struct ckrm_local_runqueue * lrq;
+ CVT_t min_cvt;
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
-{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ node = classqueue_get_head(bpt_queue(cpu));
+ lrq = (node) ? class_list_entry(node) : NULL;
+
+ if (lrq)
+ min_cvt = lrq->local_cvt;
+ else
+ min_cvt = 0;
+
+ return min_cvt;
}
/*
- * rq_lock - lock a given runqueue and disable interrupts.
+ * update the classueue base for all the runqueues
+ * TODO: we can only update half of the min_base to solve the movebackward issue
*/
-static runqueue_t *this_rq_lock(void)
-{
- runqueue_t *rq;
+static inline void check_update_class_base(int this_cpu) {
+ unsigned long min_base = 0xFFFFFFFF;
+ cq_node_t *node;
+ int i;
- local_irq_disable();
- rq = this_rq();
- spin_lock(&rq->lock);
+ if (! cpu_online(this_cpu)) return;
- return rq;
+ /*
+ * find the min_base across all the processors
+ */
+ for_each_online_cpu(i) {
+ /*
+ * I should change it to directly use bpt->base
+ */
+ node = classqueue_get_head(bpt_queue(i));
+ if (node && node->prio < min_base) {
+ min_base = node->prio;
+ }
+ }
+ if (min_base != 0xFFFFFFFF)
+ classqueue_update_base(bpt_queue(this_cpu),min_base);
}
-static inline void rq_unlock(runqueue_t *rq)
+static inline void ckrm_rebalance_tick(int j,int this_cpu)
{
- spin_unlock_irq(&rq->lock);
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ read_lock(&class_list_lock);
+ if (!(j % CVT_UPDATE_TICK))
+ update_global_cvts(this_cpu);
+
+#define CKRM_BASE_UPDATE_RATE 400
+ if (! (jiffies % CKRM_BASE_UPDATE_RATE))
+ check_update_class_base(this_cpu);
+
+ read_unlock(&class_list_lock);
+#endif
}
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-static inline ckrm_lrq_t *rq_get_next_class(struct runqueue *rq)
+static inline struct ckrm_local_runqueue *rq_get_next_class(struct runqueue *rq)
{
cq_node_t *node = classqueue_get_head(&rq->classqueue);
return ((node) ? class_list_entry(node) : NULL);
}
-/*
- * return the cvt of the current running class
- * if no current running class, return 0
- * assume cpu is valid (cpu_online(cpu) == 1)
- */
-CVT_t get_local_cur_cvt(int cpu)
-{
- ckrm_lrq_t * lrq = rq_get_next_class(cpu_rq(cpu));
-
- if (lrq)
- return lrq->local_cvt;
- else
- return 0;
-}
-
static inline struct task_struct * rq_get_next_task(struct runqueue* rq)
{
prio_array_t *array;
struct task_struct *next;
- ckrm_lrq_t *queue;
- int idx;
+ struct ckrm_local_runqueue *queue;
int cpu = smp_processor_id();
-
- // it is guaranteed be the ( rq->nr_running > 0 ) check in
- // schedule that a task will be found.
-
+
+ next = rq->idle;
retry_next_class:
- queue = rq_get_next_class(rq);
- // BUG_ON( !queue );
-
- array = queue->active;
- if (unlikely(!array->nr_active)) {
- queue->active = queue->expired;
- queue->expired = array;
- queue->expired_timestamp = 0;
+ if ((queue = rq_get_next_class(rq))) {
+ array = queue->active;
+ //check switch active/expired queue
+ if (unlikely(!queue->active->nr_active)) {
+ queue->active = queue->expired;
+ queue->expired = array;
+ queue->expired_timestamp = 0;
+
+ if (queue->active->nr_active)
+ set_top_priority(queue,
+ find_first_bit(queue->active->bitmap, MAX_PRIO));
+ else {
+ classqueue_dequeue(queue->classqueue,
+ &queue->classqueue_linkobj);
+ cpu_demand_event(get_rq_local_stat(queue,cpu),CPU_DEMAND_DEQUEUE,0);
+ }
- if (queue->active->nr_active)
- set_top_priority(queue,
- find_first_bit(queue->active->bitmap, MAX_PRIO));
- else {
- classqueue_dequeue(queue->classqueue,
- &queue->classqueue_linkobj);
- cpu_demand_event(get_rq_local_stat(queue,cpu),CPU_DEMAND_DEQUEUE,0);
+ goto retry_next_class;
}
- goto retry_next_class;
+ BUG_ON(!queue->active->nr_active);
+ next = task_list_entry(array->queue[queue->top_priority].next);
}
- // BUG_ON(!array->nr_active);
-
- idx = queue->top_priority;
- // BUG_ON (idx == MAX_PRIO);
- next = task_list_entry(array->queue[idx].next);
return next;
}
-#else /*! CONFIG_CKRM_CPU_SCHEDULE*/
+
+static inline void rq_load_inc(runqueue_t *rq, struct task_struct *p) { rq->ckrm_cpu_load += cpu_class_weight(p->cpu_class); }
+static inline void rq_load_dec(runqueue_t *rq, struct task_struct *p) { rq->ckrm_cpu_load -= cpu_class_weight(p->cpu_class); }
+
+#else /*CONFIG_CKRM_CPU_SCHEDULE*/
+
static inline struct task_struct * rq_get_next_task(struct runqueue* rq)
{
prio_array_t *array;
static inline void class_enqueue_task(struct task_struct* p, prio_array_t *array) { }
static inline void class_dequeue_task(struct task_struct* p, prio_array_t *array) { }
static inline void init_cpu_classes(void) { }
-#define rq_ckrm_load(rq) NULL
-static inline void ckrm_sched_tick(int j,int this_cpu,void* name) {}
+static inline void rq_load_inc(runqueue_t *rq, struct task_struct *p) { }
+static inline void rq_load_dec(runqueue_t *rq, struct task_struct *p) { }
#endif /* CONFIG_CKRM_CPU_SCHEDULE */
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+{
+ struct runqueue *rq;
+
+repeat_lock_task:
+ local_irq_save(*flags);
+ rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (unlikely(rq != task_rq(p))) {
+ spin_unlock_irqrestore(&rq->lock, *flags);
+ goto repeat_lock_task;
+ }
+ return rq;
+}
+
+void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static runqueue_t *this_rq_lock(void)
+{
+ runqueue_t *rq;
+
+ local_irq_disable();
+ rq = this_rq();
+ spin_lock(&rq->lock);
+
+ return rq;
+}
+
+static inline void rq_unlock(runqueue_t *rq)
+{
+ spin_unlock_irq(&rq->lock);
+}
+
/*
* Adding/removing a task to/from a priority array:
*/
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
+void dequeue_task(struct task_struct *p, prio_array_t *array)
{
+ BUG_ON(! array);
array->nr_active--;
list_del(&p->run_list);
if (list_empty(array->queue + p->prio))
class_dequeue_task(p,array);
}
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
+void enqueue_task(struct task_struct *p, prio_array_t *array)
{
list_add_tail(&p->run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap);
{
enqueue_task(p, rq_active(p,rq));
rq->nr_running++;
+ rq_load_inc(rq,p);
}
/*
{
enqueue_task_head(p, rq_active(p,rq));
rq->nr_running++;
+ rq_load_inc(rq,p);
}
static void recalc_task_prio(task_t *p, unsigned long long now)
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
+ rq_load_dec(rq,p);
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible++;
dequeue_task(p, p->array);
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
spin_lock_init(&p->switch_lock);
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_INIT,0);
-#endif
-
#ifdef CONFIG_PREEMPT
/*
* During context-switch we hold precisely one spinlock, which
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
- class_enqueue_task(p,p->array);
+ rq_load_inc(rq,p);
}
task_rq_unlock(rq, &flags);
}
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
return sum;
{
unsigned long long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
- class_enqueue_task(p,p->array);
+ rq_load_inc(rq,p);
}
} else {
/* Not the local CPU - must adjust timestamp */
{
dequeue_task(p, src_array);
src_rq->nr_running--;
+ rq_load_dec(src_rq,p);
+
set_task_cpu(p, this_cpu);
this_rq->nr_running++;
+ rq_load_inc(this_rq,p);
enqueue_task(p, this_array);
+
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
/*
}
#ifdef CONFIG_CKRM_CPU_SCHEDULE
-static inline int ckrm_preferred_task(task_t *tmp,long min, long max,
- int phase, enum idle_type idle)
+
+struct ckrm_cpu_class *find_unbalanced_class(int busiest_cpu, int this_cpu, unsigned long *cls_imbalance)
{
- long pressure = task_load(tmp);
-
- if (pressure > max)
- return 0;
+ struct ckrm_cpu_class *most_unbalanced_class = NULL;
+ struct ckrm_cpu_class *clsptr;
+ int max_unbalance = 0;
- if ((idle == NOT_IDLE) && ! phase && (pressure <= min))
- return 0;
- return 1;
+ list_for_each_entry(clsptr,&active_cpu_classes,links) {
+ struct ckrm_local_runqueue *this_lrq = get_ckrm_local_runqueue(clsptr,this_cpu);
+ struct ckrm_local_runqueue *busiest_lrq = get_ckrm_local_runqueue(clsptr,busiest_cpu);
+ int unbalance_degree;
+
+ unbalance_degree = (local_queue_nr_running(busiest_lrq) - local_queue_nr_running(this_lrq)) * cpu_class_weight(clsptr);
+ if (unbalance_degree >= *cls_imbalance)
+ continue; // already looked at this class
+
+ if (unbalance_degree > max_unbalance) {
+ max_unbalance = unbalance_degree;
+ most_unbalanced_class = clsptr;
+ }
+ }
+ *cls_imbalance = max_unbalance;
+ return most_unbalanced_class;
}
+
/*
- * move tasks for a specic local class
- * return number of tasks pulled
+ * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
*/
-static inline int ckrm_cls_move_tasks(ckrm_lrq_t* src_lrq,ckrm_lrq_t*dst_lrq,
- runqueue_t *this_rq,
- runqueue_t *busiest,
- struct sched_domain *sd,
- int this_cpu,
- enum idle_type idle,
- long* pressure_imbalance)
+static int find_busiest_cpu(runqueue_t *this_rq, int this_cpu, int idle,
+ int *imbalance)
{
- prio_array_t *array, *dst_array;
+ int cpu_load, load, max_load, i, busiest_cpu;
+ runqueue_t *busiest, *rq_src;
+
+
+ /*Hubertus ... the concept of nr_running is replace with cpu_load */
+ cpu_load = this_rq->ckrm_cpu_load;
+
+ busiest = NULL;
+ busiest_cpu = -1;
+
+ max_load = -1;
+ for_each_online_cpu(i) {
+ rq_src = cpu_rq(i);
+ load = rq_src->ckrm_cpu_load;
+
+ if ((load > max_load) && (rq_src != this_rq)) {
+ busiest = rq_src;
+ busiest_cpu = i;
+ max_load = load;
+ }
+ }
+
+ if (likely(!busiest))
+ goto out;
+
+ *imbalance = max_load - cpu_load;
+
+ /* It needs an at least ~25% imbalance to trigger balancing. */
+ if (!idle && ((*imbalance)*4 < max_load)) {
+ busiest = NULL;
+ goto out;
+ }
+
+ double_lock_balance(this_rq, busiest);
+ /*
+ * Make sure nothing changed since we checked the
+ * runqueue length.
+ */
+ if (busiest->ckrm_cpu_load <= cpu_load) {
+ spin_unlock(&busiest->lock);
+ busiest = NULL;
+ }
+out:
+ return (busiest ? busiest_cpu : -1);
+}
+
+static int load_balance(int this_cpu, runqueue_t *this_rq,
+ struct sched_domain *sd, enum idle_type idle)
+{
+ int imbalance, idx;
+ int busiest_cpu;
+ runqueue_t *busiest;
+ prio_array_t *array;
struct list_head *head, *curr;
task_t *tmp;
- int idx;
- int pulled = 0;
- int phase = -1;
- long pressure_min, pressure_max;
- /*hzheng: magic : 90% balance is enough*/
- long balance_min = *pressure_imbalance / 10;
-/*
- * we don't want to migrate tasks that will reverse the balance
- * or the tasks that make too small difference
- */
-#define CKRM_BALANCE_MAX_RATIO 100
-#define CKRM_BALANCE_MIN_RATIO 1
- start:
- phase ++;
+ struct ckrm_local_runqueue * busiest_local_queue;
+ struct ckrm_cpu_class *clsptr;
+ int weight;
+ unsigned long cls_imbalance; // so we can retry other classes
+
+ // need to update global CVT based on local accumulated CVTs
+ read_lock(&class_list_lock);
+ busiest_cpu = find_busiest_cpu(this_rq, this_cpu, idle, &imbalance);
+ if (busiest_cpu == -1)
+ goto out;
+
+ busiest = cpu_rq(busiest_cpu);
+
+ /*
+ * We only want to steal a number of tasks equal to 1/2 the imbalance,
+ * otherwise we'll just shift the imbalance to the new queue:
+ */
+ imbalance /= 2;
+
+ /* now find class on that runqueue with largest inbalance */
+ cls_imbalance = 0xFFFFFFFF;
+
+ retry_other_class:
+ clsptr = find_unbalanced_class(busiest_cpu, this_cpu, &cls_imbalance);
+ if (!clsptr)
+ goto out_unlock;
+
+ busiest_local_queue = get_ckrm_local_runqueue(clsptr,busiest_cpu);
+ weight = cpu_class_weight(clsptr);
+
/*
* We first consider expired tasks. Those will likely not be
* executed in the near future, and they are most likely to
* be cache-cold, thus switching CPUs has the least effect
* on them.
*/
- if (src_lrq->expired->nr_active) {
- array = src_lrq->expired;
- dst_array = dst_lrq->expired;
- } else {
- array = src_lrq->active;
- dst_array = dst_lrq->active;
- }
+ if (busiest_local_queue->expired->nr_active)
+ array = busiest_local_queue->expired;
+ else
+ array = busiest_local_queue->active;
new_array:
/* Start searching at priority 0: */
else
idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
if (idx >= MAX_PRIO) {
- if (array == src_lrq->expired && src_lrq->active->nr_active) {
- array = src_lrq->active;
- dst_array = dst_lrq->active;
+ if (array == busiest_local_queue->expired && busiest_local_queue->active->nr_active) {
+ array = busiest_local_queue->active;
goto new_array;
}
- if ((! phase) && (! pulled) && (idle != IDLE))
- goto start; //try again
- else
- goto out; //finished search for this lrq
+ goto retry_other_class;
}
head = array->queue + idx;
curr = curr->prev;
- if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+ if (!can_migrate_task(tmp, busiest, this_cpu, sd,idle)) {
if (curr != head)
goto skip_queue;
idx++;
goto skip_bitmap;
}
-
- pressure_min = *pressure_imbalance * CKRM_BALANCE_MIN_RATIO/100;
- pressure_max = *pressure_imbalance * CKRM_BALANCE_MAX_RATIO/100;
+ pull_task(busiest, array, tmp, this_rq, rq_active(tmp,this_rq),this_cpu);
/*
- * skip the tasks that will reverse the balance too much
+ * tmp BUG FIX: hzheng
+ * load balancing can make the busiest local queue empty
+ * thus it should be removed from bpt
*/
- if (ckrm_preferred_task(tmp,pressure_min,pressure_max,phase,idle)) {
- *pressure_imbalance -= task_load(tmp);
- pull_task(busiest, array, tmp,
- this_rq, dst_array, this_cpu);
- pulled++;
-
- if (*pressure_imbalance <= balance_min)
- goto out;
+ if (! local_queue_nr_running(busiest_local_queue)) {
+ classqueue_dequeue(busiest_local_queue->classqueue,&busiest_local_queue->classqueue_linkobj);
+ cpu_demand_event(get_rq_local_stat(busiest_local_queue,busiest_cpu),CPU_DEMAND_DEQUEUE,0);
}
-
- if (curr != head)
- goto skip_queue;
- idx++;
- goto skip_bitmap;
- out:
- return pulled;
-}
-
-static inline long ckrm_rq_imbalance(runqueue_t *this_rq,runqueue_t *dst_rq)
-{
- long imbalance;
- /*
- * make sure after balance, imbalance' > - imbalance/2
- * we don't want the imbalance be reversed too much
- */
- imbalance = pid_get_pressure(rq_ckrm_load(dst_rq),0)
- - pid_get_pressure(rq_ckrm_load(this_rq),1);
- imbalance /= 2;
- return imbalance;
-}
-
-/*
- * try to balance the two runqueues
- *
- * Called with both runqueues locked.
- * if move_tasks is called, it will try to move at least one task over
- */
-static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
- unsigned long max_nr_move, struct sched_domain *sd,
- enum idle_type idle)
-{
- struct ckrm_cpu_class *clsptr,*vip_cls = NULL;
- ckrm_lrq_t* src_lrq,*dst_lrq;
- long pressure_imbalance, pressure_imbalance_old;
- int src_cpu = task_cpu(busiest->curr);
- struct list_head *list;
- int pulled = 0;
- long imbalance;
-
- imbalance = ckrm_rq_imbalance(this_rq,busiest);
-
- if ((idle == NOT_IDLE && imbalance <= 0) || busiest->nr_running <= 1)
- goto out;
-
- //try to find the vip class
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- src_lrq = get_ckrm_lrq(clsptr,src_cpu);
- if (! lrq_nr_running(src_lrq))
- continue;
-
- if (! vip_cls || cpu_class_weight(vip_cls) < cpu_class_weight(clsptr) )
- {
- vip_cls = clsptr;
- }
+ imbalance -= weight;
+ if (!idle && (imbalance>0)) {
+ if (curr != head)
+ goto skip_queue;
+ idx++;
+ goto skip_bitmap;
}
-
- /*
- * do search from the most significant class
- * hopefully, less tasks will be migrated this way
- */
- clsptr = vip_cls;
-
- move_class:
- if (! clsptr)
- goto out;
-
-
- src_lrq = get_ckrm_lrq(clsptr,src_cpu);
- if (! lrq_nr_running(src_lrq))
- goto other_class;
-
- dst_lrq = get_ckrm_lrq(clsptr,this_cpu);
-
- //how much pressure for this class should be transferred
- pressure_imbalance = src_lrq->lrq_load * imbalance/src_lrq->local_weight;
- if (pulled && ! pressure_imbalance)
- goto other_class;
-
- pressure_imbalance_old = pressure_imbalance;
-
- //move tasks
- pulled +=
- ckrm_cls_move_tasks(src_lrq,dst_lrq,
- this_rq,
- busiest,
- sd,this_cpu,idle,
- &pressure_imbalance);
-
- /*
- * hzheng: 2 is another magic number
- * stop balancing if the imbalance is less than 25% of the orig
- */
- if (pressure_imbalance <= (pressure_imbalance_old >> 2))
- goto out;
-
- //update imbalance
- imbalance *= pressure_imbalance / pressure_imbalance_old;
- other_class:
- //who is next?
- list = clsptr->links.next;
- if (list == &active_cpu_classes)
- list = list->next;
- clsptr = list_entry(list, typeof(*clsptr), links);
- if (clsptr != vip_cls)
- goto move_class;
+ out_unlock:
+ spin_unlock(&busiest->lock);
out:
- return pulled;
-}
-
-/**
- * ckrm_check_balance - is load balancing necessary?
- * return 0 if load balancing is not necessary
- * otherwise return the average load of the system
- * also, update nr_group
- *
- * heuristics:
- * no load balancing if it's load is over average
- * no load balancing if it's load is far more than the min
- * task:
- * read the status of all the runqueues
- */
-static unsigned long ckrm_check_balance(struct sched_domain *sd, int this_cpu,
- enum idle_type idle, int* nr_group)
-{
- struct sched_group *group = sd->groups;
- unsigned long min_load, max_load, avg_load;
- unsigned long total_load, this_load, total_pwr;
-
- max_load = this_load = total_load = total_pwr = 0;
- min_load = 0xFFFFFFFF;
- *nr_group = 0;
-
- do {
- cpumask_t tmp;
- unsigned long load;
- int local_group;
- int i, nr_cpus = 0;
-
- /* Tally up the load of all CPUs in the group */
- cpus_and(tmp, group->cpumask, cpu_online_map);
- if (unlikely(cpus_empty(tmp)))
- goto nextgroup;
-
- avg_load = 0;
- local_group = cpu_isset(this_cpu, group->cpumask);
-
- for_each_cpu_mask(i, tmp) {
- load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),local_group);
- nr_cpus++;
- avg_load += load;
- }
-
- if (!nr_cpus)
- goto nextgroup;
-
- total_load += avg_load;
- total_pwr += group->cpu_power;
-
- /* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
-
- if (local_group) {
- this_load = avg_load;
- goto nextgroup;
- } else if (avg_load > max_load) {
- max_load = avg_load;
- }
- if (avg_load < min_load) {
- min_load = avg_load;
- }
-nextgroup:
- group = group->next;
- *nr_group = *nr_group + 1;
- } while (group != sd->groups);
-
- if (!max_load || this_load >= max_load)
- goto out_balanced;
-
- avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
-
- /* hzheng: debugging: 105 is a magic number
- * 100*max_load <= sd->imbalance_pct*this_load)
- * should use imbalance_pct instead
- */
- if (this_load > avg_load
- || 100*max_load < 105*this_load
- || 100*min_load < 70*this_load
- )
- goto out_balanced;
-
- return avg_load;
- out_balanced:
- return 0;
-}
-
-/**
- * any group that has above average load is considered busy
- * find the busiest queue from any of busy group
- */
-static runqueue_t *
-ckrm_find_busy_queue(struct sched_domain *sd, int this_cpu,
- unsigned long avg_load, enum idle_type idle,
- int nr_group)
-{
- struct sched_group *group;
- runqueue_t * busiest=NULL;
- unsigned long rand;
-
- group = sd->groups;
- rand = get_ckrm_rand(nr_group);
- nr_group = 0;
-
- do {
- unsigned long load,total_load,max_load;
- cpumask_t tmp;
- int i;
- runqueue_t * grp_busiest;
-
- cpus_and(tmp, group->cpumask, cpu_online_map);
- if (unlikely(cpus_empty(tmp)))
- goto find_nextgroup;
-
- total_load = 0;
- max_load = 0;
- grp_busiest = NULL;
- for_each_cpu_mask(i, tmp) {
- load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),0);
- total_load += load;
- if (load > max_load) {
- max_load = load;
- grp_busiest = cpu_rq(i);
- }
- }
-
- total_load = (total_load * SCHED_LOAD_SCALE) / group->cpu_power;
- if (total_load > avg_load) {
- busiest = grp_busiest;
- if (nr_group >= rand)
- break;
- }
- find_nextgroup:
- group = group->next;
- nr_group ++;
- } while (group != sd->groups);
-
- return busiest;
-}
-
-/**
- * load_balance - pressure based load balancing algorithm used by ckrm
- */
-static int ckrm_load_balance(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd, enum idle_type idle)
-{
- runqueue_t *busiest;
- unsigned long avg_load;
- int nr_moved,nr_group;
-
- avg_load = ckrm_check_balance(sd, this_cpu, idle, &nr_group);
- if (! avg_load)
- goto out_balanced;
-
- busiest = ckrm_find_busy_queue(sd,this_cpu,avg_load,idle,nr_group);
- if (! busiest)
- goto out_balanced;
- /*
- * This should be "impossible", but since load
- * balancing is inherently racy and statistical,
- * it could happen in theory.
- */
- if (unlikely(busiest == this_rq)) {
- WARN_ON(1);
- goto out_balanced;
- }
-
- nr_moved = 0;
- if (busiest->nr_running > 1) {
- /*
- * Attempt to move tasks. If find_busiest_group has found
- * an imbalance but busiest->nr_running <= 1, the group is
- * still unbalanced. nr_moved simply stays zero, so it is
- * correctly treated as an imbalance.
- */
- double_lock_balance(this_rq, busiest);
- nr_moved = move_tasks(this_rq, this_cpu, busiest,
- 0,sd, idle);
- spin_unlock(&busiest->lock);
- if (nr_moved) {
- adjust_local_weight();
- }
- }
-
- if (!nr_moved)
- sd->nr_balance_failed ++;
- else
- sd->nr_balance_failed = 0;
-
- /* We were unbalanced, so reset the balancing interval */
- sd->balance_interval = sd->min_interval;
-
- return nr_moved;
-
-out_balanced:
- /* tune up the balancing interval */
- if (sd->balance_interval < sd->max_interval)
- sd->balance_interval *= 2;
-
+ read_unlock(&class_list_lock);
return 0;
}
-/*
- * this_rq->lock is already held
- */
-static inline int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd)
-{
- int ret;
- read_lock(&class_list_lock);
- ret = ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
- read_unlock(&class_list_lock);
- return ret;
-}
-static inline int load_balance(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd, enum idle_type idle)
+static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
{
- int ret;
-
- spin_lock(&this_rq->lock);
- read_lock(&class_list_lock);
- ret= ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
- read_unlock(&class_list_lock);
- spin_unlock(&this_rq->lock);
- return ret;
}
-#else /*! CONFIG_CKRM_CPU_SCHEDULE */
+#else /* CONFIG_CKRM_CPU_SCHEDULE */
/*
* move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
* as part of a balancing operation within "domain". Returns the number of
out:
return nr_moved;
}
-#endif /* CONFIG_CKRM_CPU_SCHEDULE*/
-
/*
* idle_balance is called by schedule() if this_cpu is about to become
next_group:
group = group->next;
} while (group != sd->groups);
+>>>>>>> 1.1.9.3
}
+#endif /* CONFIG_CKRM_CPU_SCHEDULE*/
/*
* rebalance_tick will get called every timer tick, on every CPU.
unsigned long j = jiffies + CPU_OFFSET(this_cpu);
struct sched_domain *sd;
+ ckrm_rebalance_tick(j,this_cpu);
+
/* Update our load */
old_load = this_rq->cpu_load;
this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
*/
static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
{
+ ckrm_rebalance_tick(jiffies,cpu);
}
+
static inline void idle_balance(int cpu, runqueue_t *rq)
{
}
return 0;
}
-DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
+
EXPORT_PER_CPU_SYMBOL(kstat);
/*
#define EXPIRED_STARVING(rq) \
(STARVATION_LIMIT && ((rq)->expired_timestamp && \
(jiffies - (rq)->expired_timestamp >= \
- STARVATION_LIMIT * (lrq_nr_running(rq)) + 1)))
+ STARVATION_LIMIT * (local_queue_nr_running(rq)) + 1)))
#endif
/*
}
if (p == rq->idle) {
-#ifdef CONFIG_VSERVER_HARDCPU
if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
set_need_resched();
-#endif
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait += sys_ticks;
cpustat->idle += sys_ticks;
if (wake_priority_sleeper(rq))
goto out;
- ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
rebalance_tick(cpu, rq, IDLE);
return;
}
}
goto out_unlock;
}
+#warning MEF PLANETLAB: "if (vx_need_resched(p)) was if (!--p->time_slice) */"
if (vx_need_resched(p)) {
#ifdef CONFIG_CKRM_CPU_SCHEDULE
/* Hubertus ... we can abstract this out */
- ckrm_lrq_t* rq = get_task_lrq(p);
+ struct ckrm_local_runqueue* rq = get_task_class_queue(p);
#endif
dequeue_task(p, rq->active);
set_tsk_need_resched(p);
out_unlock:
spin_unlock(&rq->lock);
out:
- ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
rebalance_tick(cpu, rq, NOT_IDLE);
}
int maxidle = -HZ;
#endif
- /*
- * If crash dump is in progress, this other cpu's
- * need to wait until it completes.
- * NB: this code is optimized away for kernels without
- * dumping enabled.
- */
- if (unlikely(dump_oncpu))
- goto dump_scheduling_disabled;
-
//WARN_ON(system_state == SYSTEM_BOOTING);
/*
* Test if we are atomic. Since do_exit() needs to call into
spin_lock_irq(&rq->lock);
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- if (prev != rq->idle) {
- unsigned long long run = now - prev->timestamp;
- ckrm_lrq_t * lrq = get_task_lrq(prev);
-
- lrq->lrq_load -= task_load(prev);
- cpu_demand_event(&prev->demand_stat,CPU_DEMAND_DESCHEDULE,run);
- lrq->lrq_load += task_load(prev);
-
- cpu_demand_event(get_task_lrq_stat(prev),CPU_DEMAND_DESCHEDULE,run);
- update_local_cvt(prev, run);
- }
-#endif
/*
* if entering off of a kernel preemption go straight
* to picking the next task.
#endif
if (unlikely(!rq->nr_running)) {
idle_balance(cpu, rq);
- if (!rq->nr_running) {
- next = rq->idle;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- rq->expired_timestamp = 0;
-#endif
- wake_sleeping_dependent(cpu, rq);
- goto switch_tasks;
- }
+ if (!rq->nr_running) {
+ next = rq->idle;
+ rq->expired_timestamp = 0;
+ wake_sleeping_dependent(cpu, rq);
+ goto switch_tasks;
+ }
}
next = rq_get_next_task(rq);
+ if (next == rq->idle)
+ goto switch_tasks;
if (dependent_sleeper(cpu, rq, next)) {
next = rq->idle;
rq->nr_preempt++;
RCU_qsctr(task_cpu(prev))++;
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ if (prev != rq->idle) {
+ unsigned long long run = now - prev->timestamp;
+ cpu_demand_event(get_task_local_stat(prev),CPU_DEMAND_DESCHEDULE,run);
+ update_local_cvt(prev, run);
+ }
+#endif
+
prev->sleep_avg -= run_time;
if ((long)prev->sleep_avg <= 0) {
prev->sleep_avg = 0;
preempt_enable_no_resched();
if (test_thread_flag(TIF_NEED_RESCHED))
goto need_resched;
-
- return;
-
- dump_scheduling_disabled:
- /* allow scheduling only if this is the dumping cpu */
- if (dump_oncpu != smp_processor_id()+1) {
- while (dump_oncpu)
- cpu_relax();
- }
- return;
}
EXPORT_SYMBOL(schedule);
+
#ifdef CONFIG_PREEMPT
/*
* this is is the entry point to schedule() from in-kernel preemption
if (!cpu_isset(dest_cpu, p->cpus_allowed))
goto out;
+ set_task_cpu(p, dest_cpu);
if (p->array) {
/*
* Sync timestamp with rq_dest's before activating.
p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+ rq_dest->timestamp_last_tick;
deactivate_task(p, rq_src);
- set_task_cpu(p, dest_cpu);
activate_task(p, rq_dest, 0);
if (TASK_PREEMPTS_CURR(p, rq_dest))
resched_task(rq_dest->curr);
- } else
- set_task_cpu(p, dest_cpu);
+ }
out:
double_rq_unlock(rq_src, rq_dest);
}
if (rq->active_balance) {
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
active_load_balance(rq, cpu);
+#endif
rq->active_balance = 0;
}
{
runqueue_t *rq;
int i;
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
+ int j, k;
+#endif
#ifdef CONFIG_SMP
/* Set up an initial dummy domain for early boot */
sched_domain_init.groups = &sched_group_init;
sched_domain_init.last_balance = jiffies;
sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
- sched_domain_init.busy_factor = 1;
memset(&sched_group_init, 0, sizeof(struct sched_group));
sched_group_init.cpumask = CPU_MASK_ALL;
sched_group_init.next = &sched_group_init;
sched_group_init.cpu_power = SCHED_LOAD_SCALE;
#endif
+
init_cpu_classes();
for (i = 0; i < NR_CPUS; i++) {
#ifndef CONFIG_CKRM_CPU_SCHEDULE
- int j, k;
prio_array_t *array;
-
+#endif
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
- for (j = 0; j < 2; j++) {
- array = rq->arrays + j;
- for (k = 0; k < MAX_PRIO; k++) {
- INIT_LIST_HEAD(array->queue + k);
- __clear_bit(k, array->bitmap);
- }
- // delimiter for bitsearch
- __set_bit(MAX_PRIO, array->bitmap);
- }
-
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
#else
- rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ rq->ckrm_cpu_load = 0;
#endif
-
rq->best_expired_prio = MAX_PRIO;
#ifdef CONFIG_SMP
rq->sd = &sched_domain_init;
rq->cpu_load = 0;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- ckrm_load_init(rq_ckrm_load(rq));
-#endif
rq->active_balance = 0;
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
#endif
-#ifdef CONFIG_VSERVER_HARDCPU
INIT_LIST_HEAD(&rq->hold_queue);
-#endif
atomic_set(&rq->nr_iowait, 0);
+
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
+ for (j = 0; j < 2; j++) {
+ array = rq->arrays + j;
+ for (k = 0; k < MAX_PRIO; k++) {
+ INIT_LIST_HEAD(array->queue + k);
+ __clear_bit(k, array->bitmap);
+ }
+ // delimiter for bitsearch
+ __set_bit(MAX_PRIO, array->bitmap);
+ }
+#endif
}
/*
rq->idle = current;
set_task_cpu(current, smp_processor_id());
#ifdef CONFIG_CKRM_CPU_SCHEDULE
- cpu_demand_event(&(current)->demand_stat,CPU_DEMAND_INIT,0);
- current->cpu_class = get_default_cpu_class();
+ current->cpu_class = default_cpu_class;
current->array = NULL;
#endif
wake_up_forked_process(current);
#ifdef CONFIG_CKRM_CPU_SCHEDULE
/**
* return the classqueue object of a certain processor
+ * Note: not supposed to be used in performance sensitive functions
*/
struct classqueue_struct * get_cpu_classqueue(int cpu)
{
return (& (cpu_rq(cpu)->classqueue) );
}
-
-/**
- * _ckrm_cpu_change_class - change the class of a task
- */
-void _ckrm_cpu_change_class(task_t *tsk, struct ckrm_cpu_class *newcls)
-{
- prio_array_t *array;
- struct runqueue *rq;
- unsigned long flags;
-
- rq = task_rq_lock(tsk,&flags);
- array = tsk->array;
- if (array) {
- dequeue_task(tsk,array);
- tsk->cpu_class = newcls;
- enqueue_task(tsk,rq_active(tsk,rq));
- } else
- tsk->cpu_class = newcls;
-
- task_rq_unlock(rq,&flags);
-}
#endif
INIT_LIST_HEAD(&q->list);
q->flags = 0;
q->lock = NULL;
+#warning MEF PLANETLAB: q->user = get_uid(current->user); is something new in Fedora Core.
q->user = get_uid(current->user);
atomic_inc(&q->user->sigpending);
}
}
}
-EXPORT_SYMBOL_GPL(flush_signal_handlers);
/* Notify the system that a driver wants to block all signals for this
* process, and wants to be notified if any signals at all were to be
struct task_struct *t)
{
int error = -EINVAL;
- int user;
-
if (sig < 0 || sig > _NSIG)
return error;
-
- user = (!info ||
- (info != SEND_SIG_PRIV &&
- info != SEND_SIG_FORCED &&
- SI_FROMUSER(info)));
-
error = -EPERM;
- if (user && (sig != SIGCONT ||
- current->signal->session != t->signal->session)
+ if ((!info || ((unsigned long)info != 1 &&
+ (unsigned long)info != 2 && SI_FROMUSER(info)))
+ && ((sig != SIGCONT) ||
+ (current->signal->session != t->signal->session))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
&& !capable(CAP_KILL))
return error;
-
- error = -ESRCH;
- if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
- return error;
-
return security_task_kill(t, info, sig);
}
if (q) {
q->flags = 0;
+#warning MEF PLANETLAB: q->user = get_uid(t->user); is something new in Fedora Core.
q->user = get_uid(t->user);
atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list);
unsigned long flags;
int ret;
+ if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
+ return -ESRCH;
+
ret = check_kill_permission(sig, info, p);
if (!ret && sig && p->sighand) {
spin_lock_irqsave(&p->sighand->siglock, flags);
#include <linux/security.h>
#include <linux/dcookies.h>
#include <linux/suspend.h>
-#include <linux/ckrm.h>
#include <linux/vs_base.h>
#include <linux/vs_cvirt.h>
+#include <linux/ckrm.h>
#include <asm/uaccess.h>
#include <asm/io.h>
old_rlim = current->rlim + resource;
if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
(new_rlim.rlim_max > old_rlim->rlim_max)) &&
- !capable(CAP_SYS_RESOURCE) && !vx_ccaps(VXC_SET_RLIMIT))
+ !capable(CAP_SYS_RESOURCE) && vx_ccaps(VXC_SET_RLIMIT))
return -EPERM;
if (resource == RLIMIT_NOFILE) {
if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN)
#if defined(CONFIG_PPC32) && defined(CONFIG_6xx)
extern unsigned long powersave_nap;
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp);
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
static ctl_table root_table[];
static struct ctl_table_header root_table_header =
extern ctl_table pty_table[];
#endif
-int sysctl_legacy_va_layout;
-
/* /proc declarations: */
#ifdef CONFIG_PROC_FS
.procname = "tainted",
.data = &tainted,
.maxlen = sizeof(int),
- .mode = 0444,
+ .mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
- {
- .ctl_name = VM_LEGACY_VA_LAYOUT,
- .procname = "legacy_va_layout",
- .data = &sysctl_legacy_va_layout,
- .maxlen = sizeof(sysctl_legacy_va_layout),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- },
{ .ctl_name = 0 }
};
res = count;
- error = (*table->proc_handler) (table, write, file, buf, &res, ppos);
+ /*
+ * FIXME: we need to pass on ppos to the handler.
+ */
+
+ error = (*table->proc_handler) (table, write, file, buf, &res);
if (error)
return error;
return res;
* Returns 0 on success.
*/
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
size_t len;
char __user *p;
char c;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if(copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
len = strlen(table->data);
if (len > table->maxlen)
len++;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
}
return 0;
}
*/
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int r;
if (!write) {
down_read(&uts_sem);
- r=proc_dostring(table,0,filp,buffer,lenp, ppos);
+ r=proc_dostring(table,0,filp,buffer,lenp);
up_read(&uts_sem);
} else {
down_write(&uts_sem);
- r=proc_dostring(table,1,filp,buffer,lenp, ppos);
+ r=proc_dostring(table,1,filp,buffer,lenp);
up_write(&uts_sem);
}
return r;
}
static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
-#define TMPBUFLEN 21
+#define TMPBUFLEN 20
int *i, vleft, first=1, neg, val;
unsigned long lval;
size_t left, len;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
NULL,NULL);
}
*/
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int op;
}
op = (current->pid == 1) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_bset_conv,&op);
}
* Returns 0 on success.
*/
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
struct do_proc_dointvec_minmax_conv_param param = {
.min = (int *) table->extra1,
.max = (int *) table->extra2,
};
- return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
+ return do_proc_dointvec(table, write, filp, buffer, lenp,
do_proc_dointvec_minmax_conv, ¶m);
}
static int do_proc_doulongvec_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
unsigned long convmul,
unsigned long convdiv)
{
-#define TMPBUFLEN 21
+#define TMPBUFLEN 20
unsigned long *i, *min, *max, val;
int vleft, first=1, neg;
size_t len, left;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
+ return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, 1l, 1l);
}
/**
*/
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return do_proc_doulongvec_minmax(table, write, filp, buffer,
- lenp, ppos, HZ, 1000l);
+ lenp, HZ, 1000l);
}
* Returns 0 on success.
*/
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_jiffies_conv,NULL);
}
* Returns 0 on success.
*/
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_userhz_jiffies_conv,NULL);
}
#else /* CONFIG_PROC_FS */
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
}
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
psecs = (p->utime += user);
psecs += (p->stime += system);
- if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
- if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
This enables the legacy API used in vs1.xx, which allows
to use older tools (for migration purposes).
-config VSERVER_PROC_SECURE
+config PROC_SECURE
bool "Enable Proc Security"
depends on PROC_FS
default y
choice
prompt "Persistent Inode Context Tagging"
- default INOXID_UGID24
+ default INOXID_GID24
help
This adds persistent context information to filesystems
mounted with the tagxid option. Tagging is a requirement
help
no context information is store for inodes
-config INOXID_UID16
- bool "UID16/GID32"
- help
- reduces UID to 16 bit, but leaves GID at 32 bit.
-
config INOXID_GID16
bool "UID32/GID16"
help
reduces GID to 16 bit, but leaves UID at 32 bit.
-config INOXID_UGID24
+config INOXID_GID24
bool "UID24/GID24"
help
uses the upper 8bit from UID and GID for XID tagging
which leaves 24bit for UID/GID each, which should be
more than sufficient for normal use.
-config INOXID_INTERN
+config INOXID_GID32
bool "UID32/GID32"
help
this uses otherwise reserved inode fields in the on
disk representation, which limits the use to a few
filesystems (currently ext2 and ext3)
-config INOXID_RUNTIME
+config INOXID_MAGIC
bool "Runtime"
depends on EXPERIMENTAL
help
endchoice
-config VSERVER_DEBUG
- bool "Compile Debugging Code"
- default n
- help
- Set this to yes if you want to be able to activate
- debugging output at runtime. It adds a probably small
- overhead (~ ??%) to all vserver related functions and
- increases the kernel size by about 20k.
-
endmenu
obj-y += vserver.o
vserver-y := switch.o context.o namespace.o sched.o network.o inode.o \
- limit.o cvirt.o signal.o proc.o helper.o init.o dlimit.o
+ limit.o cvirt.o signal.o proc.o sysctl.o helper.o init.o \
+ dlimit.o
-vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o
vserver-$(CONFIG_VSERVER_LEGACY) += legacy.o
#include <linux/namespace.h>
#include <linux/rcupdate.h>
-#define CKRM_VSERVER_INTEGRATION
-#ifdef CKRM_VSERVER_INTEGRATION
-#include <linux/ckrm.h>
-#endif //CKRM_VSERVER_INTEGRATION
-
#include <asm/errno.h>
{
struct vx_info *new = NULL;
- vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
+ vxdprintk("alloc_vx_info(%d)\n", xid);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
new->vx_bcaps = CAP_INIT_EFF_SET;
new->vx_ccaps = 0;
- vxdprintk(VXD_CBIT(xid, 0),
- "alloc_vx_info(%d) = %p", xid, new);
+ vxdprintk("alloc_vx_info(%d) = %p\n", xid, new);
return new;
}
static void __dealloc_vx_info(struct vx_info *vxi)
{
- vxdprintk(VXD_CBIT(xid, 0),
- "dealloc_vx_info(%p)", vxi);
+ vxdprintk("dealloc_vx_info(%p)\n", vxi);
vxi->vx_hlist.next = LIST_POISON1;
vxi->vx_id = -1;
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(xid, 4),
- "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+ vxdprintk("__hash_vx_info: %p[#%d]\n", vxi, vxi->vx_id);
get_vx_info(vxi);
head = &vx_info_hash[__hashval(vxi->vx_id)];
hlist_add_head_rcu(&vxi->vx_hlist, head);
static inline void __unhash_vx_info(struct vx_info *vxi)
{
- vxdprintk(VXD_CBIT(xid, 4),
- "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+ vxdprintk("__unhash_vx_info: %p[#%d]\n", vxi, vxi->vx_id);
hlist_del_rcu(&vxi->vx_hlist);
put_vx_info(vxi);
}
do {
if (++seq > MAX_S_CONTEXT)
seq = MIN_D_CONTEXT;
- if (!__lookup_vx_info(seq)) {
- vxdprintk(VXD_CBIT(xid, 4),
- "__vx_dynamic_id: [#%d]", seq);
+ if (!__lookup_vx_info(seq))
return seq;
- }
} while (barrier != seq);
return 0;
}
{
struct vx_info *new, *vxi = NULL;
- vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
+ vxdprintk("loc_vx_info(%d)\n", id);
if (!(new = __alloc_vx_info(id))) {
*err = -ENOMEM;
else if ((vxi = __lookup_vx_info(id))) {
/* context in setup is not available */
if (vxi->vx_flags & VXF_STATE_SETUP) {
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (not available)", id, vxi);
+ vxdprintk("loc_vx_info(%d) = %p (not available)\n", id, vxi);
vxi = NULL;
*err = -EBUSY;
} else {
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (found)", id, vxi);
+ vxdprintk("loc_vx_info(%d) = %p (found)\n", id, vxi);
get_vx_info(vxi);
*err = 0;
}
}
/* new context requested */
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (new)", id, new);
+ vxdprintk("loc_vx_info(%d) = %p (new)\n", id, new);
__hash_vx_info(get_vx_info(new));
vxi = new, new = NULL;
*err = 1;
void rcu_free_vx_info(struct rcu_head *head)
{
- struct vx_info *vxi = container_of(head, struct vx_info, vx_rcu);
+ struct vx_info *vxi = container_of(head, struct vx_info, vx_rcu);
int usecnt, refcnt;
BUG_ON(!vxi || !head);
refcnt = atomic_read(&vxi->vx_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(xid, 3),
- "rcu_free_vx_info(%p): uc=%d", vxi, usecnt);
if (!usecnt)
__dealloc_vx_info(vxi);
else
static inline int vx_nofiles_task(struct task_struct *tsk)
{
struct files_struct *files = tsk->files;
- const unsigned long *obptr;
+ const unsigned long *obptr, *cbptr;
int count, total;
spin_lock(&files->file_lock);
obptr = files->open_fds->fds_bits;
+ cbptr = files->close_on_exec->fds_bits;
count = files->max_fds / (sizeof(unsigned long) * 8);
for (total = 0; count > 0; count--) {
if (*obptr)
total += hweight_long(*obptr);
obptr++;
+ /* if (*cbptr)
+ total += hweight_long(*cbptr);
+ cbptr++; */
}
spin_unlock(&files->file_lock);
return total;
}
-#if 0
-
static inline int vx_openfd_task(struct task_struct *tsk)
{
struct files_struct *files = tsk->files;
return total;
}
-#endif
-
/*
* migrate task to new context
* gets vxi, puts old_vxi on change
if (old_vxi == vxi)
goto out;
- vxdprintk(VXD_CBIT(xid, 5),
- "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
+ vxdprintk("vx_migrate_task(%p,%p[#%d.%d)\n", p, vxi,
vxi->vx_id, atomic_read(&vxi->vx_usecnt));
if (!(ret = vx_migrate_user(p, vxi))) {
- int nofiles;
+ int openfd, nofiles;
task_lock(p);
- // openfd = vx_openfd_task(p);
+ openfd = vx_openfd_task(p);
nofiles = vx_nofiles_task(p);
if (old_vxi) {
atomic_dec(&old_vxi->cacct.nr_threads);
atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
- atomic_sub(nofiles, &old_vxi->limit.rcur[RLIMIT_NOFILE]);
- // atomic_sub(openfd, &old_vxi->limit.rcur[RLIMIT_OPENFD]);
+ atomic_sub(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
+ atomic_sub(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
}
atomic_inc(&vxi->cacct.nr_threads);
atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
atomic_add(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
- // atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
-
- vxdprintk(VXD_CBIT(xid, 5),
- "moved task %p into vxi:%p[#%d]",
- p, vxi, vxi->vx_id);
-
+ atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
/* should be handled in set_vx_info !! */
if (old_vxi)
clr_vx_info(&p->vx_info);
// put_vx_info(old_vxi);
}
out:
-
-
-#ifdef CKRM_VSERVER_INTEGRATION
- do {
- ckrm_cb_xid(p);
- } while (0);
-#endif //CKRM_VSERVER_INTEGRATION
-
-
put_vx_info(old_vxi);
return ret;
}
void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
{
struct vx_info *vxi = current->vx_info;
- struct timeval bias;
-
- jiffies_to_timeval(vxi->cvirt.bias_jiffies - INITIAL_JIFFIES, &bias);
set_normalized_timespec(uptime,
- uptime->tv_sec - bias.tv_sec,
- uptime->tv_nsec - bias.tv_usec*1000);
+ uptime->tv_sec - vxi->cvirt.bias_tp.tv_sec,
+ uptime->tv_nsec - vxi->cvirt.bias_tp.tv_nsec);
if (!idle)
return;
set_normalized_timespec(idle,
{
struct dl_info *new = NULL;
- vxdprintk(VXD_CBIT(dlim, 5),
- "alloc_dl_info(%p,%d)*", sb, xid);
+ vxdprintk("alloc_dl_info(%p,%d)\n", sb, xid);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
/* rest of init goes here */
- vxdprintk(VXD_CBIT(dlim, 4),
- "alloc_dl_info(%p,%d) = %p", sb, xid, new);
+ vxdprintk("alloc_dl_info(%p,%d) = %p\n", sb, xid, new);
return new;
}
static void __dealloc_dl_info(struct dl_info *dli)
{
- vxdprintk(VXD_CBIT(dlim, 4),
- "dealloc_dl_info(%p)", dli);
+ vxdprintk("dealloc_dl_info(%p)\n", dli);
dli->dl_hlist.next = LIST_POISON1;
dli->dl_xid = -1;
static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
{
- return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
+ return ((xid ^ (unsigned int)sb) % DL_HASH_SIZE);
}
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(dlim, 6),
- "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
+ vxdprintk("__hash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
get_dl_info(dli);
head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
hlist_add_head_rcu(&dli->dl_hlist, head);
static inline void __unhash_dl_info(struct dl_info *dli)
{
- vxdprintk(VXD_CBIT(dlim, 6),
- "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
+ vxdprintk("__unhash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
hlist_del_rcu(&dli->dl_hlist);
put_dl_info(dli);
}
rcu_read_lock();
dli = get_dl_info(__lookup_dl_info(sb, xid));
- vxdprintk(VXD_CBIT(dlim, 7),
- "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
rcu_read_unlock();
return dli;
}
void rcu_free_dl_info(struct rcu_head *head)
{
- struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
+ struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
int usecnt, refcnt;
BUG_ON(!dli || !head);
refcnt = atomic_read(&dli->dl_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(dlim, 3),
- "rcu_free_dl_info(%p)", dli);
if (!usecnt)
__dealloc_dl_info(dli);
else
return;
}
-#include <linux/module.h>
-
-EXPORT_SYMBOL_GPL(locate_dl_info);
-EXPORT_SYMBOL_GPL(rcu_free_dl_info);
-// EXPORT_SYMBOL_GPL(dl_info_hash_lock);
-// EXPORT_SYMBOL_GPL(unhash_dl_info);
-
{
int ret = 0;
-#ifdef CONFIG_VSERVER_DEBUG
vserver_register_sysctl();
-#endif
return ret;
}
static void __exit exit_vserver(void)
{
-#ifdef CONFIG_VSERVER_DEBUG
vserver_unregister_sysctl();
-#endif
return;
}
return ret;
}
-int vc_iattr_ioctl(struct dentry *de, unsigned int cmd, unsigned long arg)
-{
- void __user *data = (void __user *)arg;
- struct vcmd_ctx_iattr_v1 vc_data;
- int ret;
-
- /*
- * I don't think we need any dget/dput pairs in here as long as
- * this function is always called from sys_ioctl i.e., de is
- * a field of a struct file that is guaranteed not to be freed.
- */
- if (cmd == FIOC_SETIATTR) {
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
- if (copy_from_user (&vc_data, data, sizeof(vc_data)))
- return -EFAULT;
- ret = __vc_set_iattr(de,
- &vc_data.xid, &vc_data.flags, &vc_data.mask);
- }
- else {
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
- ret = __vc_get_iattr(de->d_inode,
- &vc_data.xid, &vc_data.flags, &vc_data.mask);
- }
-
- if (!ret && copy_to_user (data, &vc_data, sizeof(vc_data)))
- ret = -EFAULT;
- return ret;
-}
-
#ifdef CONFIG_VSERVER_LEGACY
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
-const char *vlimit_name[NUM_LIMITS] = {
- [RLIMIT_CPU] = "CPU",
- [RLIMIT_RSS] = "RSS",
- [RLIMIT_NPROC] = "NPROC",
- [RLIMIT_NOFILE] = "NOFILE",
- [RLIMIT_MEMLOCK] = "VML",
- [RLIMIT_AS] = "VM",
- [RLIMIT_LOCKS] = "LOCKS",
- [RLIMIT_MSGQUEUE] = "MSGQ",
- [VLIMIT_NSOCK] = "NSOCK",
-};
-
-
static int is_valid_rlimit(int id)
{
int valid = 0;
{
struct nx_info *new = NULL;
- vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
+ nxdprintk("alloc_nx_info()\n");
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
/* rest of init goes here */
- vxdprintk(VXD_CBIT(nid, 0),
- "alloc_nx_info() = %p", new);
+ nxdprintk("alloc_nx_info() = %p\n", new);
return new;
}
static void __dealloc_nx_info(struct nx_info *nxi)
{
- vxdprintk(VXD_CBIT(nid, 0),
- "dealloc_nx_info(%p)", nxi);
+ nxdprintk("dealloc_nx_info(%p)\n", nxi);
nxi->nx_hlist.next = LIST_POISON1;
nxi->nx_id = -1;
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(nid, 4),
- "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+ nxdprintk("__hash_nx_info: %p[#%d]\n", nxi, nxi->nx_id);
get_nx_info(nxi);
head = &nx_info_hash[__hashval(nxi->nx_id)];
hlist_add_head_rcu(&nxi->nx_hlist, head);
static inline void __unhash_nx_info(struct nx_info *nxi)
{
- vxdprintk(VXD_CBIT(nid, 4),
- "__unhash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+ nxdprintk("__unhash_nx_info: %p[#%d]\n", nxi, nxi->nx_id);
hlist_del_rcu(&nxi->nx_hlist);
put_nx_info(nxi);
}
do {
if (++seq > MAX_N_CONTEXT)
seq = MIN_D_CONTEXT;
- if (!__lookup_nx_info(seq)) {
- vxdprintk(VXD_CBIT(nid, 4),
- "__nx_dynamic_id: [#%d]", seq);
+ if (!__lookup_nx_info(seq))
return seq;
- }
} while (barrier != seq);
return 0;
}
{
struct nx_info *new, *nxi = NULL;
- vxdprintk(VXD_CBIT(nid, 1), "loc_nx_info(%d)*", id);
+ nxdprintk("loc_nx_info(%d)\n", id);
if (!(new = __alloc_nx_info(id))) {
*err = -ENOMEM;
else if ((nxi = __lookup_nx_info(id))) {
/* context in setup is not available */
if (nxi->nx_flags & VXF_STATE_SETUP) {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (not available)", id, nxi);
+ nxdprintk("loc_nx_info(%d) = %p (not available)\n", id, nxi);
nxi = NULL;
*err = -EBUSY;
} else {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (found)", id, nxi);
+ nxdprintk("loc_nx_info(%d) = %p (found)\n", id, nxi);
get_nx_info(nxi);
*err = 0;
}
}
/* new context requested */
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (new)", id, new);
+ nxdprintk("loc_nx_info(%d) = %p (new)\n", id, new);
__hash_nx_info(get_nx_info(new));
nxi = new, new = NULL;
*err = 1;
void rcu_free_nx_info(struct rcu_head *head)
{
- struct nx_info *nxi = container_of(head, struct nx_info, nx_rcu);
+ struct nx_info *nxi = container_of(head, struct nx_info, nx_rcu);
int usecnt, refcnt;
BUG_ON(!nxi || !head);
refcnt = atomic_read(&nxi->nx_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(nid, 3),
- "rcu_free_nx_info(%p): uc=%d", nxi, usecnt);
if (!usecnt)
__dealloc_nx_info(nxi);
else
struct nx_info *new;
int err;
- vxdprintk(VXD_CBIT(nid, 5), "create_nx_info(%s)", "void");
+ nxdprintk("create_nx_info()\n");
if (!(new = __loc_nx_info(NX_DYNAMIC_ID, &err)))
return NULL;
return new;
if (!p || !nxi)
BUG();
- vxdprintk(VXD_CBIT(nid, 5),
- "nx_migrate_task(%p,%p[#%d.%d.%d])",
+ nxdprintk("nx_migrate_task(%p,%p[#%d.%d.%d])\n",
p, nxi, nxi->nx_id,
atomic_read(&nxi->nx_usecnt),
atomic_read(&nxi->nx_refcnt));
#include <linux/vserver/dlimit.h>
+extern unsigned int vx_debug_switch;
+
+
extern asmlinkage long
sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
{
- vxdprintk(VXD_CBIT(switch, 0),
- "vc: VCMD_%02d_%d[%d], %d",
- VC_CATEGORY(cmd), VC_COMMAND(cmd),
- VC_VERSION(cmd), id);
+ if (vx_debug_switch)
+ printk( "vc: VCMD_%02d_%d[%d], %d\n",
+ VC_CATEGORY(cmd), VC_COMMAND(cmd),
+ VC_VERSION(cmd), id);
switch (cmd) {
case VCMD_get_version:
enum {
CTL_DEBUG_SWITCH = 1,
- CTL_DEBUG_XID,
- CTL_DEBUG_NID,
- CTL_DEBUG_NET,
CTL_DEBUG_LIMIT,
- CTL_DEBUG_DLIM,
- CTL_DEBUG_CVIRT,
+ CTL_DEBUG_DLIMIT,
};
unsigned int vx_debug_switch = 0;
-unsigned int vx_debug_xid = 0;
-unsigned int vx_debug_nid = 0;
-unsigned int vx_debug_net = 0;
unsigned int vx_debug_limit = 0;
-unsigned int vx_debug_dlim = 0;
-unsigned int vx_debug_cvirt = 0;
+unsigned int vx_debug_dlimit = 0;
static struct ctl_table_header *vserver_table_header;
static int proc_dodebug(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ struct file *file, void *buffer, size_t *lenp)
{
char tmpbuf[20], *p, c;
unsigned int value;
size_t left, len;
- if ((*ppos && !write) || !*lenp) {
+ if ((file->f_pos && !write) || !*lenp) {
*lenp = 0;
return 0;
}
done:
*lenp -= left;
- *ppos += *lenp;
+ file->f_pos += *lenp;
return 0;
}
.mode = 0644,
.proc_handler = &proc_dodebug
},
- {
- .ctl_name = CTL_DEBUG_XID,
- .procname = "debug_xid",
- .data = &vx_debug_xid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_NID,
- .procname = "debug_nid",
- .data = &vx_debug_nid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_NET,
- .procname = "debug_net",
- .data = &vx_debug_net,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
{
.ctl_name = CTL_DEBUG_LIMIT,
.procname = "debug_limit",
.proc_handler = &proc_dodebug
},
{
- .ctl_name = CTL_DEBUG_DLIM,
- .procname = "debug_dlim",
- .data = &vx_debug_dlim,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_CVIRT,
- .procname = "debug_cvirt",
- .data = &vx_debug_cvirt,
+ .ctl_name = CTL_DEBUG_DLIMIT,
+ .procname = "debug_dlimit",
+ .data = &vx_debug_dlimit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
{ .ctl_name = 0 }
};
-
-EXPORT_SYMBOL_GPL(vx_debug_dlim);
-EXPORT_SYMBOL_GPL(vx_debug_nid);
-EXPORT_SYMBOL_GPL(vx_debug_xid);
-
/* nothing */;
return tmp;
}
-EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
}
return dest;
}
-EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
return tmp;
}
-EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
return tmp;
}
-EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
return __res;
}
-EXPORT_SYMBOL(strcmp);
#endif
#ifndef __HAVE_ARCH_STRNCMP
return __res;
}
-EXPORT_SYMBOL(strncmp);
#endif
#ifndef __HAVE_ARCH_STRCHR
return NULL;
return (char *) s;
}
-EXPORT_SYMBOL(strchr);
#endif
#ifndef __HAVE_ARCH_STRRCHR
} while (--p >= s);
return NULL;
}
-EXPORT_SYMBOL(strrchr);
#endif
#ifndef __HAVE_ARCH_STRNCHR
return (char *) s;
return NULL;
}
-EXPORT_SYMBOL(strnchr);
#endif
#ifndef __HAVE_ARCH_STRLEN
/* nothing */;
return sc - s;
}
-EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
/* nothing */;
return sc - s;
}
-EXPORT_SYMBOL(strnlen);
#endif
#ifndef __HAVE_ARCH_STRSPN
return count;
}
-EXPORT_SYMBOL(strcspn);
#ifndef __HAVE_ARCH_STRPBRK
/**
}
return NULL;
}
-EXPORT_SYMBOL(strpbrk);
#endif
#ifndef __HAVE_ARCH_STRSEP
return s;
}
-EXPORT_SYMBOL(memset);
#endif
#ifndef __HAVE_ARCH_BCOPY
while (count--)
*dest++ = *src++;
}
-EXPORT_SYMBOL(bcopy);
#endif
#ifndef __HAVE_ARCH_MEMCPY
return dest;
}
-EXPORT_SYMBOL(memcpy);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
return dest;
}
-EXPORT_SYMBOL(memmove);
#endif
#ifndef __HAVE_ARCH_MEMCMP
break;
return res;
}
-EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
}
return (void *) p;
}
-EXPORT_SYMBOL(memscan);
#endif
#ifndef __HAVE_ARCH_STRSTR
}
return NULL;
}
-EXPORT_SYMBOL(strstr);
#endif
#ifndef __HAVE_ARCH_MEMCHR
}
return NULL;
}
-EXPORT_SYMBOL(memchr);
+
#endif
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt == 'Z' || *fmt == 'z') {
- qualifier = *fmt++;
- if (unlikely(qualifier == *fmt)) {
- if (qualifier == 'h') {
- qualifier = 'H';
- fmt++;
- } else if (qualifier == 'l') {
- qualifier = 'L';
- fmt++;
- }
- }
+ qualifier = *fmt;
+ fmt++;
}
base = 10;
is_sign = 0;
break;
switch(qualifier) {
- case 'H': /* that's 'hh' in format */
- if (is_sign) {
- signed char *s = (signed char *) va_arg(args,signed char *);
- *s = (signed char) simple_strtol(str,&next,base);
- } else {
- unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
- *s = (unsigned char) simple_strtoul(str, &next, base);
- }
- break;
case 'h':
if (is_sign) {
short *s = (short *) va_arg(args,short *);
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
shmem.o vmalloc.o
-obj-y := bootmem.o filemap.o mempool.o fadvise.o \
+obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
page_alloc.o page-writeback.o pdflush.o prio_tree.o \
readahead.o slab.o swap.o truncate.o vmscan.o \
$(mmu-y)
-obj-$(CONFIG_OOM_KILL) += oom_kill.o
-obj-$(CONFIG_OOM_PANIC) += oom_panic.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_X86_4G) += usercopy.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
-obj-$(CONFIG_PROC_MM) += proc_mm.o
obj-$(CONFIG_NUMA) += mempolicy.o
+obj-$(CONFIG_PROC_MM) += proc_mm.o
*/
unsigned long max_low_pfn;
unsigned long min_low_pfn;
-EXPORT_SYMBOL(min_low_pfn);
unsigned long max_pfn;
EXPORT_SYMBOL(max_pfn); /* This is exported so
{
struct page *page;
+ /*
+ * We scan the hash list read-only. Addition to and removal from
+ * the hash-list needs a held write-lock.
+ */
spin_lock_irq(&mapping->tree_lock);
page = radix_tree_lookup(&mapping->page_tree, offset);
if (page)
struct file * filp,
loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor,
- int nonblock)
+ read_actor_t actor)
{
struct inode *inode = mapping->host;
unsigned long index, end_index, offset;
find_page:
page = find_get_page(mapping, index);
if (unlikely(page == NULL)) {
- if (nonblock) {
- desc->error = -EWOULDBLOCKIO;
- break;
- }
handle_ra_miss(mapping, &ra, index);
goto no_cached_page;
}
- if (!PageUptodate(page)) {
- if (nonblock) {
- page_cache_release(page);
- desc->error = -EWOULDBLOCKIO;
- break;
- }
+ if (!PageUptodate(page))
goto page_not_up_to_date;
- }
page_ok:
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (desc.count == 0)
continue;
desc.error = 0;
- do_generic_file_read(filp,ppos,&desc,file_read_actor,0);
+ do_generic_file_read(filp,ppos,&desc,file_read_actor);
retval += desc.written;
if (!retval) {
retval = desc.error;
desc.arg.data = target;
desc.error = 0;
- do_generic_file_read(in_file, ppos, &desc, actor, 0);
+ do_generic_file_read(in_file, ppos, &desc, actor);
if (desc.written)
return desc.written;
return desc.error;
* effect.
*/
error = page_cache_read(file, pgoff);
- grab_swap_token();
/*
* The page we want has now been added to the page cache.
return err;
}
} else {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
+ /*
+ * If a nonlinear mapping then store the file page offset
+ * in the pte.
+ */
+ if (pgoff != linear_page_index(vma, addr)) {
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
+ }
}
len -= PAGE_SIZE;
if (err)
goto out;
- inode_update_time(inode, file->f_vfsmnt, 1);
+ inode_update_time(inode, 1);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, struct page *page, pgprot_t prot)
{
- struct inode *inode;
- pgoff_t size;
int err = -ENOMEM;
pte_t *pte;
pgd_t *pgd;
if (!pte)
goto err_unlock;
- /*
- * This page may have been truncated. Tell the
- * caller about it.
- */
- err = -EINVAL;
- inode = vma->vm_file->f_mapping->host;
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (!page->mapping || page->index >= size)
- goto err_unlock;
-
zap_pte(mm, vma, addr, pte);
// mm->rss++;
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
- int i, err = 0;
+ int i;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- err = -EIO;
+ goto out_eio;
+
+ set_bit(BIO_UPTODATE, &bio_orig->bi_flags);
/*
* free up bounce indirect pages used
mempool_free(bvec->bv_page, pool);
}
- bio_endio(bio_orig, bio_orig->bi_size, err);
+out_eio:
+ bio_endio(bio_orig, bio_orig->bi_size, 0);
bio_put(bio);
}
#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
- size_t *length, loff_t *ppos)
+ size_t *length)
{
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, file, buffer, length);
max_huge_pages = set_max_huge_pages(max_huge_pages);
return 0;
}
goto out;
if (pmd_huge(*pmd))
return follow_huge_pmd(mm, address, pmd, write);
- if (pmd_bad(*pmd))
+ if (unlikely(pmd_bad(*pmd)))
goto out;
ptep = pte_offset_map(pmd, address);
if (pte_present(pte)) {
if (write && !pte_write(pte))
goto out;
- if (write && !pte_dirty(pte)) {
- struct page *page = pte_page(pte);
- if (!PageDirty(page))
- set_page_dirty(page);
- }
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
-
+ page = pfn_to_page(pfn);
+ if (write && !pte_dirty(pte) && !PageDirty(page))
+ set_page_dirty(page);
mark_page_accessed(page);
return page;
} else {
pte_t *pte;
if (write) /* user gate pages are read-only */
return i ? : -EFAULT;
- pgd = pgd_offset_gate(mm, pg);
+ pgd = pgd_offset(mm, pg);
if (!pgd)
return i ? : -EFAULT;
pmd = pmd_offset(pgd, pg);
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
- grab_swap_token();
}
if (!vx_rsspages_avail(mm, 1)) {
pte_t entry;
struct page * page = ZERO_PAGE(addr);
+ if (!vx_rsspages_avail(mm, 1)) {
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_OOM;
+ }
+
/* Read-only mapping of ZERO_PAGE. */
entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
if (unlikely(anon_vma_prepare(vma)))
goto no_mem;
- if (!vx_rsspages_avail(mm, 1))
- goto no_mem;
-
page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!page)
goto no_mem;
*/
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- //++mm->rss;
- vx_rsspages_inc(mm);
+ if (!PageReserved(new_page))
+ // ++mm->rss;
+ vx_rsspages_inc(mm);
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
ret = make_pages_present(start, end);
}
- // vma->vm_mm->locked_vm -= pages;
- vx_vmlocked_sub(vma->vm_mm, pages);
+ vma->vm_mm->locked_vm -= pages;
out:
return ret;
}
ret = -ENOMEM;
if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
goto out;
+ /* check vserver lock limits? */
if ((current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
out:
up_write(¤t->mm->mmap_sem);
return ret;
}
-
-/*
- * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
- * shm segments) get accounted against the user_struct instead.
- */
-static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
-
-int user_shm_lock(size_t size, struct user_struct *user)
-{
- unsigned long lock_limit, locked;
- int allowed = 0;
-
- spin_lock(&shmlock_user_lock);
- locked = size >> PAGE_SHIFT;
- lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
- lock_limit >>= PAGE_SHIFT;
- if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
- goto out;
- get_uid(user);
- user->locked_shm += locked;
- allowed = 1;
-out:
- spin_unlock(&shmlock_user_lock);
- return allowed;
-}
-
-void user_shm_unlock(size_t size, struct user_struct *user)
-{
- spin_lock(&shmlock_user_lock);
- user->locked_shm -= (size >> PAGE_SHIFT);
- spin_unlock(&shmlock_user_lock);
- free_uid(user);
-}
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
-#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
int accountable = 1;
unsigned long charged = 0;
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
- prot |= PROT_EXEC;
-
if (file) {
if (is_file_hugepages(file))
accountable = 0;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area(file, addr, len, pgoff, flags, prot & PROT_EXEC);
if (addr & ~PAGE_MASK)
return addr;
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
-unsigned long
+static inline unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
addr = vma->vm_end;
}
}
+#else
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
#endif
-void arch_unmap_area(struct vm_area_struct *area)
-{
- /*
- * Is this a new hole at the lowest possible address?
- */
- if (area->vm_start >= TASK_UNMAPPED_BASE &&
- area->vm_start < area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_start;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- */
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma, *prev_vma;
- struct mm_struct *mm = current->mm;
- unsigned long base = mm->mmap_base, addr = addr0;
- int first_time = 1;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /* dont allow allocations above current base */
- if (mm->free_area_cache > base)
- mm->free_area_cache = base;
-
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
-try_again:
- /* make sure it can fit in the remaining address space */
- if (mm->free_area_cache < len)
- goto fail;
-
- /* either no address requested or cant fit in requested address hole */
- addr = (mm->free_area_cache - len) & PAGE_MASK;
- do {
- /*
- * Lookup failure means no vma is above this address,
- * i.e. return with success:
- */
- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
- return addr;
-
- /*
- * new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
- */
- if (addr+len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end)))
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- else
- /* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end)
- mm->free_area_cache = vma->vm_start;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
-
-fail:
- /*
- * if hint left us with no space for the requested
- * mapping then try again:
- */
- if (first_time) {
- mm->free_area_cache = base;
- first_time = 0;
- goto try_again;
- }
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
- mm->free_area_cache = base;
-
- return addr;
-}
-
-void arch_unmap_area_topdown(struct vm_area_struct *area)
-{
- /*
- * Is this a new hole at the highest possible address?
- */
- if (area->vm_end > area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_end;
-}
-
-
unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, unsigned long exec)
{
if (flags & MAP_FIXED) {
unsigned long ret;
return file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
- if (exec && current->mm->get_unmapped_exec_area)
- return current->mm->get_unmapped_exec_area(file, addr, len, pgoff, flags);
- else
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, exec);
}
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-
-#define SHLIB_BASE 0x00111000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
- unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
- unsigned long addr = addr0, len = len0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long tmp;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!addr && !(flags & MAP_FIXED))
- addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)) {
- return addr;
- }
- }
-
- addr = SHLIB_BASE;
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Must not let a PROT_EXEC mapping get into the
- * brk area:
- */
- if (addr + len > mm->brk)
- goto failed;
-
- /*
- * Up until the brk area we randomize addresses
- * as much as possible:
- */
- if (addr >= 0x01000000) {
- tmp = randomize_range(0x01000000, mm->brk, len);
- vma = find_vma(mm, tmp);
- if (TASK_SIZE - len >= tmp &&
- (!vma || tmp + len <= vma->vm_start))
- return tmp;
- }
- /*
- * Ok, randomization didnt work out - return
- * the result of the linear search:
- */
- return addr;
- }
- addr = vma->vm_end;
- }
-
-failed:
- return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
-
+EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
address &= PAGE_MASK;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
- /* Overcommit.. vx check first to avoid vm_unacct_memory() */
- if (!vx_vmpages_avail(vma->vm_mm, grow) ||
- security_vm_enough_memory(grow)) {
+ /* Overcommit.. */
+ if (security_vm_enough_memory(grow) ||
+ !vx_vmpages_avail(vma->vm_mm, grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
+
vma->vm_end = address;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
address &= PAGE_MASK;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- /* Overcommit.. vx check first to avoid vm_unacct_memory() */
- if (!vx_vmpages_avail(vma->vm_mm, grow) ||
- security_vm_enough_memory(grow)) {
+ /* Overcommit.. */
+ if (security_vm_enough_memory(grow) ||
+ !vx_vmpages_avail(vma->vm_mm, grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
+
vma->vm_start = address;
vma->vm_pgoff -= grow;
// vma->vm_mm->total_vm += grow;
static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
{
size_t len = area->vm_end - area->vm_start;
+ unsigned long old_end = area->vm_end;
// area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
if (area->vm_flags & VM_LOCKED)
// area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
- area->vm_mm->unmap_area(area);
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ area->vm_start < area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_start;
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+ if (area->vm_start > area->vm_mm->non_executable_cache)
+ area->vm_mm->non_executable_cache = area->vm_start;
remove_vm_struct(area);
+ if (unlikely(area->vm_flags & VM_EXEC))
+ arch_remove_exec_range(mm, old_end);
}
/*
locked += len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
- if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
- return -ENOMEM;
+ /* vserver checks ? */
}
/*
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
-#include <linux/personality.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
return -EINVAL;
if (end == start)
return 0;
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
- prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
ret = -EAGAIN;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
goto out;
- ret = -ENOMEM;
- if (!vx_vmlocked_avail(current->mm,
- (new_len - old_len) >> PAGE_SHIFT))
- goto out;
}
ret = -ENOMEM;
if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
vx_vmpages_add(current->mm, pages);
if (vma->vm_flags & VM_LOCKED) {
// current->mm->locked_vm += pages;
- vx_vmlocked_add(vma->vm_mm, pages);
+ vx_vmlocked_add(current->mm, pages);
make_pages_present(addr + old_len,
addr + new_len);
}
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
- new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
- vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+ vma->vm_pgoff, map_flags,
+ vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out;
/**
* out_of_memory - is the system out of memory?
*/
-void out_of_memory(int gfp_mask)
+void out_of_memory(void)
{
/*
* oom_lock protects out_of_memory()'s static variables.
*/
lastkill = now;
- printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
- show_free_areas();
-
/* oom_kill() sleeps */
spin_unlock(&oom_lock);
oom_kill();
+++ /dev/null
-/*
- * Just panic() instead of the default behavior of selecting processes
- * for death.
- *
- * Based on
- * Modular OOM handlers for 2.6.4 (C) 2003,2004 Tvrtko A. Ursulin
- * and
- * linux/mm/oom_kill.c (C) 1998,2000 Rik van Riel.
- *
- * Mark Huang <mlhuang@cs.princeton.edu>
- *
- * $Id$
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-
-/**
- * out_of_memory - is the system out of memory?
- */
-void out_of_memory(int gfp_mask)
-{
- /*
- * oom_lock protects out_of_memory()'s static variables.
- * It's a global lock; this is not performance-critical.
- */
- static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED;
- static unsigned long count;
-
- spin_lock(&oom_lock);
-
- /*
- * If we have gotten only a few failures,
- * we're not really oom.
- */
- if (++count < 10)
- goto out_unlock;
-
- /*
- * Ok, really out of memory. Panic.
- */
-
- printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
- show_free_areas();
-
- panic("Out Of Memory");
-
-out_unlock:
- spin_unlock(&oom_lock);
-}
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length);
if (dirty_writeback_centisecs) {
mod_timer(&wb_timer,
jiffies + (dirty_writeback_centisecs * HZ) / 100);
#include <linux/cpu.h>
#include <linux/vs_base.h>
#include <linux/vs_limit.h>
-#include <linux/ckrm_mem_inline.h>
#include <asm/tlbflush.h>
EXPORT_SYMBOL(totalram_pages);
EXPORT_SYMBOL(nr_swap_pages);
-#ifdef CONFIG_CRASH_DUMP_MODULE
-/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
-EXPORT_SYMBOL(pgdat_list);
-#endif
-
/*
* Used by page_zone() to look up the address of the struct zone whose
* id is encoded in the upper bits of page->flags
page->mapcount = 0;
}
-#if !defined(CONFIG_HUGETLB_PAGE) && !defined(CONFIG_CRASH_DUMP) \
- && !defined(CONFIG_CRASH_DUMP_MODULE)
+#ifndef CONFIG_HUGETLB_PAGE
#define prep_compound_page(page, order) do { } while (0)
#define destroy_compound_page(page, order) do { } while (0)
#else
/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->lru);
__free_pages_bulk(page, base, zone, area, order);
- ckrm_clear_page_class(page);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);
might_sleep_if(wait);
- if (!ckrm_class_limit_ok((GET_MEM_CLASS(current)))) {
- return NULL;
- }
-
zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
if (zones[0] == NULL) /* no zones in the zonelist */
return NULL;
return NULL;
got_pg:
kernel_map_pages(page, 1 << order, 1);
- ckrm_set_pages_class(page, 1 << order, GET_MEM_CLASS(current));
return page;
}
EXPORT_SYMBOL(nr_free_pages);
+unsigned int nr_used_zone_pages(void)
+{
+ unsigned int pages = 0;
+ struct zone *zone;
+
+ for_each_zone(zone)
+ pages += zone->nr_active + zone->nr_inactive;
+
+ return pages;
+}
+
#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -= zholes_size[i];
pgdat->node_present_pages = realtotalpages;
- printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
+ printk("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
- if (!is_highmem_idx(zone))
+ if (!is_highmem(zone))
set_page_address(page, __va(start_pfn << PAGE_SHIFT));
#endif
start_pfn++;
pcp->batch = 1 * batch;
INIT_LIST_HEAD(&pcp->list);
}
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
+ printk(" %s zone: %lu pages, LIFO batch:%lu\n",
zone_names[j], realsize, batch);
INIT_LIST_HEAD(&zone->active_list);
INIT_LIST_HEAD(&zone->inactive_list);
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length);
setup_per_zone_pages_min();
setup_per_zone_protection();
return 0;
* whenever sysctl_lower_zone_protection changes.
*/
int lower_zone_protection_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ proc_dointvec_minmax(table, write, file, buffer, length);
setup_per_zone_protection();
return 0;
}
struct file *filp, unsigned long offset)
{
unsigned max;
+ unsigned min;
unsigned orig_next_size;
unsigned actual;
int first_access=0;
if (max == 0)
goto out; /* No readahead */
+ min = get_min_readahead(ra);
orig_next_size = ra->next_size;
if (ra->next_size == 0) {
* pages shall be accessed in the next
* current window.
*/
- average = ra->average;
- if (ra->serial_cnt > average)
- average = (ra->serial_cnt + ra->average + 1) / 2;
-
- ra->next_size = min(average , (unsigned long)max);
+ ra->next_size = min(ra->average , (unsigned long)max);
}
ra->start = offset;
ra->size = ra->next_size;
ra->size = max;
ra->ahead_start = 0;
ra->ahead_size = 0;
- ra->average = max / 2;
}
}
ra->prev_page = offset;
if (ptep_clear_flush_young(vma, address, pte))
referenced++;
- if (mm != current->mm && has_swap_token(mm))
- referenced++;
-
(*mapcount)--;
out_unmap:
* an exclusive swap page, do_wp_page will replace it by a copy
* page, and the user never get to see the data GUP was holding
* the original page for.
- *
- * This test is also useful for when swapoff (unuse_process) has
- * to drop page lock: its reference to the page stops existing
- * ptes from being unmapped, so swapoff can make progress.
*/
if (PageSwapCache(page) &&
page_count(page) != page->mapcount + 2) {
page_remove_rmap(page);
page_cache_release(page);
- // mm->rss--;
- vx_rsspages_dec(mm);
+ mm->rss--;
(*mapcount)--;
}
#include <asm/pgtable.h>
/* This magic number is used in glibc for posix shared memory */
+#define TMPFS_MAGIC 0x01021994
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct page *page = NULL;
swp_entry_t *entry;
+ static const swp_entry_t unswapped = { 0 };
if (sgp != SGP_WRITE &&
((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
while (!(entry = shmem_swp_entry(info, index, &page))) {
if (sgp == SGP_READ)
- return shmem_swp_map(ZERO_PAGE(0));
+ return (swp_entry_t *) &unswapped;
/*
* Test free_blocks against 1 not 0, since we have 1 data
* page (and perhaps indirect index pages) yet to allocate:
return err;
}
} else if (nonblock) {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
+ /*
+ * If a nonlinear mapping then store the file page
+ * offset in the pte.
+ */
+ if (pgoff != linear_page_index(vma, addr)) {
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
+ }
}
len -= PAGE_SIZE;
}
#endif
-int shmem_lock(struct file *file, int lock, struct user_struct *user)
+/* Protects current->user->locked_shm from concurrent access */
+static spinlock_t shmem_lock_user = SPIN_LOCK_UNLOCKED;
+
+int shmem_lock(struct file *file, int lock, struct user_struct * user)
{
struct inode *inode = file->f_dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
+ unsigned long lock_limit, locked;
int retval = -ENOMEM;
spin_lock(&info->lock);
+ spin_lock(&shmem_lock_user);
if (lock && !(info->flags & VM_LOCKED)) {
- if (!user_shm_lock(inode->i_size, user))
+ locked = inode->i_size >> PAGE_SHIFT;
+ locked += user->locked_shm;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
goto out_nomem;
- info->flags |= VM_LOCKED;
+ /* for this branch user == current->user so it won't go away under us */
+ atomic_inc(&user->__count);
+ user->locked_shm = locked;
}
if (!lock && (info->flags & VM_LOCKED) && user) {
- user_shm_unlock(inode->i_size, user);
- info->flags &= ~VM_LOCKED;
+ locked = inode->i_size >> PAGE_SHIFT;
+ user->locked_shm -= locked;
+ free_uid(user);
}
+ if (lock)
+ info->flags |= VM_LOCKED;
+ else
+ info->flags &= ~VM_LOCKED;
retval = 0;
out_nomem:
+ spin_unlock(&shmem_lock_user);
spin_unlock(&info->lock);
return retval;
}
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- buf->f_type = TMPFS_SUPER_MAGIC;
+ buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
spin_lock(&sbinfo->stat_lock);
buf->f_blocks = sbinfo->max_blocks;
sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = TMPFS_SUPER_MAGIC;
+ sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
/*
* This file contains the default values for the opereation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
- * Documentation/sysctl/vm.txt.
+ * linux/Documentation/sysctl/vm.txt.
* Started 18.12.91
* Swap aging added 23.2.95, Stephen Tweedie.
* Buffermem limits added 12.3.98, Rik van Riel.
check_next_cluster:
if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
{
- unsigned long nr;
+ int nr;
for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
if (si->swap_map[nr])
{
/*
* Go through process' page directory.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
- /*
- * Our reference to the page stops try_to_unmap_one from
- * unmapping its ptes, so swapoff can make progress.
- */
- unlock_page(page);
- down_read(&mm->mmap_sem);
- lock_page(page);
- }
+ down_read(&mm->mmap_sem);
spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!is_vm_hugetlb_page(vma)) {
+++ /dev/null
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cs.wm.edu/~sjiang/token.pdf
- */
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-
-static spinlock_t swap_token_lock = SPIN_LOCK_UNLOCKED;
-static unsigned long swap_token_timeout;
-unsigned long swap_token_check;
-struct mm_struct * swap_token_mm = &init_mm;
-
-#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2)
-#define SWAP_TOKEN_TIMEOUT (HZ * 300)
-
-/*
- * Take the token away if the process had no page faults
- * in the last interval, or if it has held the token for
- * too long.
- */
-#define SWAP_TOKEN_ENOUGH_RSS 1
-#define SWAP_TOKEN_TIMED_OUT 2
-static int should_release_swap_token(struct mm_struct *mm)
-{
- int ret = 0;
- if (!mm->recent_pagein)
- ret = SWAP_TOKEN_ENOUGH_RSS;
- else if (time_after(jiffies, swap_token_timeout))
- ret = SWAP_TOKEN_TIMED_OUT;
- mm->recent_pagein = 0;
- return ret;
-}
-
-/*
- * Try to grab the swapout protection token. We only try to
- * grab it once every TOKEN_CHECK_INTERVAL, both to prevent
- * SMP lock contention and to check that the process that held
- * the token before is no longer thrashing.
- */
-void grab_swap_token(void)
-{
- struct mm_struct *mm;
- int reason;
-
- /* We have the token. Let others know we still need it. */
- if (has_swap_token(current->mm)) {
- current->mm->recent_pagein = 1;
- return;
- }
-
- if (time_after(jiffies, swap_token_check)) {
-
- /* Can't get swapout protection if we exceed our RSS limit. */
- // if (current->mm->rss > current->mm->rlimit_rss)
- // return;
-
- /* ... or if we recently held the token. */
- if (time_before(jiffies, current->mm->swap_token_time))
- return;
-
- if (!spin_trylock(&swap_token_lock))
- return;
-
- swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-
- mm = swap_token_mm;
- if ((reason = should_release_swap_token(mm))) {
- unsigned long eligible = jiffies;
- if (reason == SWAP_TOKEN_TIMED_OUT) {
- eligible += SWAP_TOKEN_TIMEOUT;
- }
- mm->swap_token_time = eligible;
- swap_token_timeout = jiffies + SWAP_TOKEN_TIMEOUT;
- swap_token_mm = current->mm;
- }
- spin_unlock(&swap_token_lock);
- }
- return;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
- spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm)) {
- swap_token_mm = &init_mm;
- swap_token_check = jiffies;
- }
- spin_unlock(&swap_token_lock);
-}
#include <asm/div64.h>
#include <linux/swapops.h>
-#include <linux/ckrm_mem.h>
-
-#ifndef AT_LIMIT_SUPPORT
-#warning "ckrm_at_limit disabled due to problems with memory hog tests -- seting ckrm_shrink_list_empty to true"
-#undef ckrm_shrink_list_empty
-#define ckrm_shrink_list_empty() (1)
-#endif
/* possible outcome of pageout() */
typedef enum {
/* This context's GFP mask */
unsigned int gfp_mask;
- /* Flag used by CKRM */
- unsigned int ckrm_flags;
-
int may_writepage;
};
* slab to avoid swapping.
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
- *
- * `lru_pages' represents the number of on-LRU pages in all the zones which
- * are eligible for the caller's allocation attempt. It is used for balancing
- * slab reclaim versus page reclaim.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
- unsigned long lru_pages)
+static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
{
struct shrinker *shrinker;
+ long pages;
if (down_trylock(&shrinker_sem))
return 0;
+ pages = nr_used_zone_pages();
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
delta = (4 * scanned) / shrinker->seeks;
delta *= (*shrinker->shrinker)(0, gfp_mask);
- do_div(delta, lru_pages + 1);
+ do_div(delta, pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0)
shrinker->nr = LONG_MAX; /* It wrapped! */
{
LIST_HEAD(page_list);
struct pagevec pvec;
- int max_scan = sc->nr_to_scan, nr_pass;
- unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
+ int max_scan = sc->nr_to_scan;
pagevec_init(&pvec, 1);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
-redo:
- ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
- nr_pass = zone->nr_inactive;
while (max_scan > 0) {
struct page *page;
int nr_taken = 0;
int nr_scan = 0;
int nr_freed;
- while (nr_pass-- && nr_scan++ < SWAP_CLUSTER_MAX &&
+ while (nr_scan++ < SWAP_CLUSTER_MAX &&
!list_empty(&zone->inactive_list)) {
page = lru_to_page(&zone->inactive_list);
SetPageLRU(page);
list_add(&page->lru, &zone->inactive_list);
continue;
- } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
- __put_page(page);
- SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
- list_add_tail(&page->lru, &zone->inactive_list);
-#else
- list_add(&page->lru, &zone->inactive_list);
-#endif
- continue;
}
list_add(&page->lru, &page_list);
- ckrm_mem_dec_inactive(page);
nr_taken++;
}
zone->nr_inactive -= nr_taken;
zone->pages_scanned += nr_taken;
spin_unlock_irq(&zone->lru_lock);
- if ((bit_flag == 0) && (nr_taken == 0))
+ if (nr_taken == 0)
goto done;
max_scan -= nr_scan;
spin_lock_irq(&zone->lru_lock);
}
}
- if (ckrm_flags && (nr_pass <= 0)) {
- goto redo;
- }
}
spin_unlock_irq(&zone->lru_lock);
done:
long mapped_ratio;
long distress;
long swap_tendency;
- unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
- int nr_pass;
lru_add_drain();
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
-redo:
- ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
- nr_pass = zone->nr_active;
- while (pgscanned < nr_pages && !list_empty(&zone->active_list) &&
- nr_pass) {
+ while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
page = lru_to_page(&zone->active_list);
prefetchw_prev_lru_page(page, &zone->active_list, flags);
if (!TestClearPageLRU(page))
__put_page(page);
SetPageLRU(page);
list_add(&page->lru, &zone->active_list);
- pgscanned++;
- } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
- __put_page(page);
- SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
- list_add_tail(&page->lru, &zone->active_list);
-#else
- list_add(&page->lru, &zone->active_list);
-#endif
} else {
list_add(&page->lru, &l_hold);
- ckrm_mem_dec_active(page);
pgmoved++;
- pgscanned++;
- }
- if (!--nr_pass && ckrm_flags) {
- goto redo;
}
+ pgscanned++;
}
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
if (!TestClearPageActive(page))
BUG();
list_move(&page->lru, &zone->inactive_list);
- ckrm_mem_inc_inactive(page);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_inactive += pgmoved;
BUG();
BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
- ckrm_mem_inc_active(page);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_active += pgmoved;
sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
while (nr_active || nr_inactive) {
- sc->ckrm_flags = ckrm_setup_reclamation();
if (nr_active) {
sc->nr_to_scan = min(nr_active,
(unsigned long)SWAP_CLUSTER_MAX);
if (sc->nr_to_reclaim <= 0)
break;
}
- ckrm_teardown_reclamation();
- }
-}
-
-#if defined(CONFIG_CKRM_RES_MEM) && defined(AT_LIMIT_SUPPORT)
-// This function needs to be given more thought.
-// Shrink the class to be at 90% of its limit
-static void
-ckrm_shrink_class(ckrm_mem_res_t *cls)
-{
- struct scan_control sc;
- struct zone *zone;
- int zindex = 0, active_credit = 0, inactive_credit = 0;
-
- if (ckrm_test_set_shrink(cls)) { // set the SHRINK bit atomically
- // if it is already set somebody is working on it. so... leave
- return;
- }
- sc.nr_mapped = read_page_state(nr_mapped);
- sc.nr_scanned = 0;
- sc.ckrm_flags = ckrm_get_reclaim_flags(cls);
- sc.nr_reclaimed = 0;
- sc.priority = 0; // always very high priority
-
- for_each_zone(zone) {
- int zone_total, zone_limit, active_limit, inactive_limit;
- int active_over, inactive_over;
- unsigned long nr_active, nr_inactive;
- u64 temp;
-
- zone->temp_priority = zone->prev_priority;
- zone->prev_priority = sc.priority;
-
- zone_total = zone->nr_active + zone->nr_inactive + zone->free_pages;
-
- temp = (u64) cls->pg_limit * zone_total;
- do_div(temp, ckrm_tot_lru_pages);
- zone_limit = (int) temp;
- active_limit = (6 * zone_limit) / 10; // 2/3rd in active list
- inactive_limit = (3 * zone_limit) / 10; // 1/3rd in inactive list
-
- active_over = cls->nr_active[zindex] - active_limit + active_credit;
- inactive_over = active_over +
- (cls->nr_inactive[zindex] - inactive_limit) + inactive_credit;
-
- if (active_over > 0) {
- zone->nr_scan_active += active_over + 1;
- nr_active = zone->nr_scan_active;
- active_credit = 0;
- } else {
- active_credit += active_over;
- nr_active = 0;
- }
-
- if (inactive_over > 0) {
- zone->nr_scan_inactive += inactive_over;
- nr_inactive = zone->nr_scan_inactive;
- inactive_credit = 0;
- } else {
- inactive_credit += inactive_over;
- nr_inactive = 0;
- }
- while (nr_active || nr_inactive) {
- if (nr_active) {
- sc.nr_to_scan = min(nr_active,
- (unsigned long)SWAP_CLUSTER_MAX);
- nr_active -= sc.nr_to_scan;
- refill_inactive_zone(zone, &sc);
- }
-
- if (nr_inactive) {
- sc.nr_to_scan = min(nr_inactive,
- (unsigned long)SWAP_CLUSTER_MAX);
- nr_inactive -= sc.nr_to_scan;
- shrink_cache(zone, &sc);
- if (sc.nr_to_reclaim <= 0)
- break;
- }
- }
- zone->prev_priority = zone->temp_priority;
- zindex++;
}
- ckrm_clear_shrink(cls);
}
-static void
-ckrm_shrink_classes(void)
-{
- ckrm_mem_res_t *cls;
-
- spin_lock(&ckrm_mem_lock);
- while (!ckrm_shrink_list_empty()) {
- cls = list_entry(ckrm_shrink_list.next, ckrm_mem_res_t,
- shrink_list);
- spin_unlock(&ckrm_mem_lock);
- ckrm_shrink_class(cls);
- spin_lock(&ckrm_mem_lock);
- list_del(&cls->shrink_list);
- cls->flags &= ~MEM_AT_LIMIT;
- }
- spin_unlock(&ckrm_mem_lock);
-}
-
-#else
-
-#if defined(CONFIG_CKRM_RES_MEM) && !defined(AT_LIMIT_SUPPORT)
-#warning "disabling ckrm_at_limit -- setting ckrm_shrink_classes to noop "
-#endif
-
-#define ckrm_shrink_classes() do { } while(0)
-#endif
-
/*
* This is the direct reclaim path, for page-allocating processes. We only
* try to reclaim pages from zones which will satisfy the caller's allocation
int total_scanned = 0, total_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc;
- unsigned long lru_pages = 0;
int i;
sc.gfp_mask = gfp_mask;
inc_page_state(allocstall);
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
-
- zone->temp_priority = DEF_PRIORITY;
- lru_pages += zone->nr_active + zone->nr_inactive;
- }
+ for (i = 0; zones[i] != 0; i++)
+ zones[i]->temp_priority = DEF_PRIORITY;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_reclaimed = 0;
sc.priority = priority;
shrink_caches(zones, &sc);
- shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
+ shrink_slab(sc.nr_scanned, gfp_mask);
if (reclaim_state) {
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
blk_congestion_wait(WRITE, HZ/10);
}
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
- out_of_memory(gfp_mask);
+ out_of_memory();
out:
for (i = 0; zones[i] != 0; i++)
zones[i]->prev_priority = zones[i]->temp_priority;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int all_zones_ok = 1;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
- unsigned long lru_pages = 0;
+
if (nr_pages == 0) {
/*
end_zone = pgdat->nr_zones - 1;
}
scan:
- for (i = 0; i <= end_zone; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- lru_pages += zone->nr_active + zone->nr_inactive;
- }
-
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
sc.priority = priority;
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
+ shrink_slab(sc.nr_scanned, GFP_KERNEL);
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_reclaimed += sc.nr_reclaimed;
if (zone->all_unreclaimable)
finish_wait(&pgdat->kswapd_wait, &wait);
try_to_clip_inodes();
- if (!ckrm_shrink_list_empty())
- ckrm_shrink_classes();
- else
balance_pgdat(pgdat, 0);
}
return 0;
*/
void wakeup_kswapd(struct zone *zone)
{
- if ((zone->free_pages > zone->pages_low) && ckrm_shrink_list_empty())
+ if (zone->free_pages > zone->pages_low)
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return;
/* Global VLAN variables */
/* Our listing of VLAN group(s) */
-struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
+struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+spinlock_t vlan_group_lock = SPIN_LOCK_UNLOCKED;
#define vlan_grp_hashfn(IDX) ((((IDX) >> VLAN_GRP_HASH_SHIFT) ^ (IDX)) & VLAN_GRP_HASH_MASK)
static char vlan_fullname[] = "802.1Q VLAN Support";
.func = vlan_skb_recv, /* VLAN receive method */
};
-/* Bits of netdev state that are propogated from real device to virtual */
-#define VLAN_LINK_STATE_MASK \
- ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER))
-
/* End of global variables definitions. */
/*
* references left.
*/
for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) {
- BUG_ON(!hlist_empty(&vlan_group_hash[i]));
+ if (vlan_group_hash[i] != NULL)
+ BUG();
}
vlan_proc_cleanup();
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
-/* Must be invoked with RCU read lock (no preempt) */
+/* Must be invoked with vlan_group_lock held. */
static struct vlan_group *__vlan_find_group(int real_dev_ifindex)
{
struct vlan_group *grp;
- struct hlist_node *n;
- int hash = vlan_grp_hashfn(real_dev_ifindex);
- hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
+ for (grp = vlan_group_hash[vlan_grp_hashfn(real_dev_ifindex)];
+ grp != NULL;
+ grp = grp->next) {
if (grp->real_dev_ifindex == real_dev_ifindex)
- return grp;
+ break;
}
- return NULL;
+ return grp;
+}
+
+/* Must hold vlan_group_lock. */
+static void __grp_hash(struct vlan_group *grp)
+{
+ struct vlan_group **head;
+
+ head = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ grp->next = *head;
+ *head = grp;
+}
+
+/* Must hold vlan_group_lock. */
+static void __grp_unhash(struct vlan_group *grp)
+{
+ struct vlan_group *next, **pprev;
+
+ pprev = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ next = *pprev;
+ while (next != grp) {
+ pprev = &next->next;
+ next = *pprev;
+ }
+ *pprev = grp->next;
}
/* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
*
- * Must be invoked with RCU read lock (no preempt)
+ * Must be invoked with vlan_group_lock held.
*/
struct net_device *__find_vlan_dev(struct net_device *real_dev,
unsigned short VID)
return NULL;
}
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
- kfree(container_of(rcu, struct vlan_group, rcu));
-}
-
-
/* This returns 0 if everything went fine.
* It will return 1 if the group was killed as a result.
* A negative return indicates failure.
if (vlan_id >= VLAN_VID_MASK)
return -EINVAL;
- ASSERT_RTNL();
+ spin_lock_bh(&vlan_group_lock);
grp = __vlan_find_group(real_dev_ifindex);
+ spin_unlock_bh(&vlan_group_lock);
ret = 0;
if (real_dev->features & NETIF_F_HW_VLAN_RX)
real_dev->vlan_rx_register(real_dev, NULL);
- hlist_del_rcu(&grp->hlist);
-
- /* Free the group, after all cpu's are done. */
- call_rcu(&grp->rcu, vlan_rcu_free);
+ spin_lock_bh(&vlan_group_lock);
+ __grp_unhash(grp);
+ spin_unlock_bh(&vlan_group_lock);
+ /* Free the group, after we have removed it
+ * from the hash.
+ */
+ kfree(grp);
grp = NULL;
+
ret = 1;
}
}
new_dev->set_mac_address = vlan_dev_set_mac_address;
new_dev->set_multicast_list = vlan_dev_set_multicast_list;
new_dev->destructor = free_netdev;
- new_dev->do_ioctl = vlan_dev_ioctl;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
struct vlan_group *grp;
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
+ int r;
char name[IFNAMSIZ];
#ifdef VLAN_DEBUG
if (!(real_dev->flags & IFF_UP))
goto out_unlock;
- if (__find_vlan_dev(real_dev, VLAN_ID) != NULL) {
+ spin_lock_bh(&vlan_group_lock);
+ r = (__find_vlan_dev(real_dev, VLAN_ID) != NULL);
+ spin_unlock_bh(&vlan_group_lock);
+
+ if (r) {
/* was already registered. */
printk(VLAN_DBG "%s: ALREADY had VLAN registered\n", __FUNCTION__);
goto out_unlock;
new_dev->flags = real_dev->flags;
new_dev->flags &= ~IFF_UP;
- new_dev->state = real_dev->state & VLAN_LINK_STATE_MASK;
-
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
*/
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
+ spin_lock_bh(&vlan_group_lock);
grp = __vlan_find_group(real_dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
/* Note, we are running under the RTNL semaphore
* so it cannot "appear" on us.
memset(grp, 0, sizeof(struct vlan_group));
grp->real_dev_ifindex = real_dev->ifindex;
- hlist_add_head_rcu(&grp->hlist,
- &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
+ spin_lock_bh(&vlan_group_lock);
+ __grp_hash(grp);
+ spin_unlock_bh(&vlan_group_lock);
if (real_dev->features & NETIF_F_HW_VLAN_RX)
real_dev->vlan_rx_register(real_dev, grp);
static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
- struct vlan_group *grp = __vlan_find_group(dev->ifindex);
+ struct net_device *dev = (struct net_device *)(ptr);
+ struct vlan_group *grp = NULL;
int i, flgs;
- struct net_device *vlandev;
+ struct net_device *vlandev = NULL;
+
+ spin_lock_bh(&vlan_group_lock);
+ grp = __vlan_find_group(dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
if (!grp)
goto out;
*/
switch (event) {
- case NETDEV_CHANGE:
- /* Propogate real device state to vlan devices */
- flgs = dev->state & VLAN_LINK_STATE_MASK;
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev)
- continue;
-
- if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
- vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK)
- | flgs;
- netdev_state_change(vlandev);
- }
- }
+ case NETDEV_CHANGEADDR:
+ case NETDEV_GOING_DOWN:
+ /* Ignore for now */
break;
case NETDEV_DOWN:
ret = unregister_vlan_dev(dev,
VLAN_DEV_INFO(vlandev)->vlan_id);
+ dev_put(vlandev);
unregister_netdevice(vlandev);
/* Group was destroyed? */
#define VLAN_GRP_HASH_SHIFT 5
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
-extern struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
+extern struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+extern spinlock_t vlan_group_lock;
/* Find a VLAN device by the MAC address of its Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*
- * Must be invoked with rcu_read_lock (ie preempt disabled)
- * or with RTNL.
+ * Must be invoked with vlan_group_lock held and that lock MUST NOT
+ * be dropped until a reference is obtained on the returned device.
+ * You may drop the lock earlier if you are running under the RTNL
+ * semaphore, however.
*/
struct net_device *__find_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
-int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
/* We have 12 bits of vlan ID.
*
- * We must not drop allow preempt until we hold a
+ * We must not drop the vlan_group_lock until we hold a
* reference to the device (netif_rx does that) or we
* fail.
*/
- rcu_read_lock();
+ spin_lock_bh(&vlan_group_lock);
skb->dev = __find_vlan_dev(dev, vid);
if (!skb->dev) {
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n",
*/
if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n",
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_lock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
vlan_flush_mc_list(dev);
return 0;
}
-
-int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
- struct ifreq ifrr;
- int err = -EOPNOTSUPP;
-
- strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
- ifrr.ifr_ifru = ifr->ifr_ifru;
-
- switch(cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (real_dev->do_ioctl && netif_device_present(real_dev))
- err = real_dev->do_ioctl(dev, &ifrr, cmd);
- break;
-
- case SIOCETHTOOL:
- err = dev_ethtool(&ifrr);
- }
-
- if (!err)
- ifr->ifr_ifru = ifrr.ifr_ifru;
-
- return err;
-}
-
/** Taken from Gleb + Lennert's VLAN code, and modified... */
void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
{
config IPV6
tristate "The IPv6 protocol (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
- select CRYPTO if IPV6_PRIVACY
- select CRYPTO_MD5 if IPV6_PRIVACY
---help---
This is experimental support for the IP version 6 (formerly called
IPng "IP next generation"). You will still be able to do
If unsure, say N.
+config NET_FASTROUTE
+ bool "Fast switching (read help!)"
+ depends on EXPERIMENTAL
+ ---help---
+ Saying Y here enables direct NIC-to-NIC (NIC = Network Interface
+ Card) data transfers on the local network, which is fast.
+
+ IMPORTANT NOTE: This option is NOT COMPATIBLE with "Network packet
+ filtering" (CONFIG_NETFILTER). Say N here if you say Y there.
+
+ However, it will work with all options in the "Advanced router"
+ section (except for "Use TOS value as routing key" and
+ "Use FWMARK value as routing key").
+
+ At the moment, few devices support fast switching (tulip is one of
+ them, a modified 8390 driver can be found at
+ <ftp://ftp.tux.org/pub/net/ip-routing/fastroute/fastroute-8390.tar.gz>).
+
+ If unsure, say N.
+
config NET_HW_FLOWCONTROL
bool "Forwarding between high speed interfaces"
depends on EXPERIMENTAL
source "net/bluetooth/Kconfig"
-source "net/tux/Kconfig"
-
source "drivers/net/Kconfig"
endmenu
ifneq ($(CONFIG_IPV6),)
obj-y += ipv6/
endif
-obj-$(CONFIG_TUX) += tux/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_NET_SCHED) += sched/
case SIOCADDRT: {
struct net_device *dev = NULL;
+ /*
+ * FIXME: the name of the device is still in user
+ * space, isn't it?
+ */
if (rt.rt_dev) {
- char name[IFNAMSIZ];
- if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
- return -EFAULT;
- name[IFNAMSIZ-1] = '\0';
- dev = __dev_get_by_name(name);
+ dev = __dev_get_by_name(rt.rt_dev);
if (!dev)
return -ENODEV;
}
BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb);
}
- __module_get(THIS_MODULE);
+ (void) try_module_get(THIS_MODULE);
return 0;
error:
write_unlock_irq(&devs_lock);
/*
* lec.c: Lan Emulation driver
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
static int lec_close(struct net_device *dev);
static struct net_device_stats *lec_get_stats(struct net_device *dev);
static void lec_init(struct net_device *dev);
-static struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
+static inline struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr);
-static int lec_arp_remove(struct lec_priv *priv,
+static inline int lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove);
/* LANE2 functions */
static void lane2_associate_ind (struct net_device *dev, u8 *mac_address,
/*
* Remove entry from lec_arp_table
*/
-static int
+static inline int
lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove)
{
/*
* Find entry by mac_address
*/
-static struct lec_arp_table*
+static inline struct lec_arp_table*
lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr)
{
*
* Lan Emulation client header file
*
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
/*
* Lec arp cache
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
#ifndef _LEC_ARP_H
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
-#include <linux/seq_file.h>
/* We are an ethernet device */
#include <linux/if_ether.h>
return 0;
}
-/* this is buggered - we need locking for qos_head */
-void atm_mpoa_disp_qos(struct seq_file *m)
+void atm_mpoa_disp_qos(char *page, ssize_t *len)
{
+
unsigned char *ip;
char ipaddr[16];
struct atm_mpoa_qos *qos;
qos = qos_head;
- seq_printf(m, "QoS entries for shortcuts:\n");
- seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
+ *len += sprintf(page + *len, "QoS entries for shortcuts:\n");
+ *len += sprintf(page + *len, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
ipaddr[sizeof(ipaddr)-1] = '\0';
while (qos != NULL) {
ip = (unsigned char *)&qos->ipaddr;
sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(ip));
- seq_printf(m, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
+ *len += sprintf(page + *len, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
NIPQUAD(ipaddr),
qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu,
qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu);
qos = qos->next;
}
+
+ return;
}
static struct net_device *find_lec_by_itfnum(int itf)
int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos);
/* Display QoS entries. This is for the procfs */
-struct seq_file;
-void atm_mpoa_disp_qos(struct seq_file *m);
+void atm_mpoa_disp_qos(char *page, ssize_t *len);
#endif /* _MPC_H_ */
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
-static int proc_mpc_open(struct inode *inode, struct file *file);
+static ssize_t proc_mpc_read(struct file *file, char __user *buff,
+ size_t count, loff_t *pos);
+
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
-static int parse_qos(const char *buff);
+static int parse_qos(const char *buff, int len);
/*
* Define allowed FILE OPERATIONS
*/
static struct file_operations mpc_file_operations = {
.owner = THIS_MODULE,
- .open = proc_mpc_open,
- .read = seq_read,
- .llseek = seq_lseek,
+ .read = proc_mpc_read,
.write = proc_mpc_write,
- .release = seq_release,
};
+static int print_header(char *buff,struct mpoa_client *mpc){
+ if(mpc != NULL){
+ return sprintf(buff,"\nInterface %d:\n\n",mpc->dev_num);
+
+ }
+ return 0;
+}
+
/*
* Returns the state of an ingress cache entry as a string
*/
}
}
-/*
- * FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
- */
-
-static void *mpc_start(struct seq_file *m, loff_t *pos)
-{
- loff_t l = *pos;
- struct mpoa_client *mpc;
-
- if (!l--)
- return SEQ_START_TOKEN;
- for (mpc = mpcs; mpc; mpc = mpc->next)
- if (!l--)
- return mpc;
- return NULL;
-}
-
-static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct mpoa_client *p = v;
- (*pos)++;
- return v == SEQ_START_TOKEN ? mpcs : p->next;
-}
-
-static void mpc_stop(struct seq_file *m, void *v)
-{
-}
-
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
-static ssize_t mpc_show(struct seq_file *m, void *v)
-{
- struct mpoa_client *mpc = v;
+static ssize_t proc_mpc_read(struct file *file, char __user *buff,
+ size_t count, loff_t *pos){
+ unsigned long page = 0;
unsigned char *temp;
- int i;
+ ssize_t length = 0;
+ int i = 0;
+ struct mpoa_client *mpc = mpcs;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
struct timeval now;
unsigned char ip_string[16];
-
- if (v == SEQ_START_TOKEN) {
- atm_mpoa_disp_qos(m);
- return 0;
- }
-
- seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
- seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
- do_gettimeofday(&now);
-
- for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
- temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip;
- sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- seq_printf(m, "%-16s%s%-14lu%-12u",
- ip_string,
- ingress_state_string(in_entry->entry_state),
- in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec),
- in_entry->packets_fwded);
- if (in_entry->shortcut)
- seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
- seq_printf(m, "\n");
+ if(count == 0)
+ return 0;
+ page = get_zeroed_page(GFP_KERNEL);
+ if(!page)
+ return -ENOMEM;
+ atm_mpoa_disp_qos((char *)page, &length);
+ while(mpc != NULL){
+ length += print_header((char *)page + length, mpc);
+ length += sprintf((char *)page + length,"Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
+ in_entry = mpc->in_cache;
+ do_gettimeofday(&now);
+ while(in_entry != NULL){
+ temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ length += sprintf((char *)page + length,"%-16s%s%-14lu%-12u", ip_string, ingress_state_string(in_entry->entry_state), (in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec)), in_entry->packets_fwded);
+ if(in_entry->shortcut)
+ length += sprintf((char *)page + length," %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
+ length += sprintf((char *)page + length,"\n");
+ in_entry = in_entry->next;
+ }
+ length += sprintf((char *)page + length,"\n");
+ eg_entry = mpc->eg_cache;
+ length += sprintf((char *)page + length,"Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
+ while(eg_entry != NULL){
+ for(i=0;i<ATM_ESA_LEN;i++){
+ length += sprintf((char *)page + length,"%02x",eg_entry->ctrl_info.in_MPC_data_ATM_addr[i]);}
+ length += sprintf((char *)page + length,"\n%-16lu%s%-14lu%-15u",(unsigned long) ntohl(eg_entry->ctrl_info.cache_id), egress_state_string(eg_entry->entry_state), (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), eg_entry->packets_rcvd);
+
+ /* latest IP address */
+ temp = (unsigned char *)&eg_entry->latest_ip_addr;
+ sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ length += sprintf((char *)page + length, "%-16s", ip_string);
+
+ if(eg_entry->shortcut)
+ length += sprintf((char *)page + length," %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
+ length += sprintf((char *)page + length,"\n");
+ eg_entry = eg_entry->next;
+ }
+ length += sprintf((char *)page + length,"\n");
+ mpc = mpc->next;
}
- seq_printf(m, "\n");
- seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
- for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
- unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
- for(i = 0; i < ATM_ESA_LEN; i++)
- seq_printf(m, "%02x", p[i]);
- seq_printf(m, "\n%-16lu%s%-14lu%-15u",
- (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
- egress_state_string(eg_entry->entry_state),
- (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)),
- eg_entry->packets_rcvd);
-
- /* latest IP address */
- temp = (unsigned char *)&eg_entry->latest_ip_addr;
- sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- seq_printf(m, "%-16s", ip_string);
-
- if (eg_entry->shortcut)
- seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
- seq_printf(m, "\n");
+ if (*pos >= length) length = 0;
+ else {
+ if ((count + *pos) > length) count = length - *pos;
+ if (copy_to_user(buff, (char *)page , count)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ *pos += count;
}
- seq_printf(m, "\n");
- return 0;
-}
-
-static struct seq_operations mpc_op = {
- .start = mpc_start,
- .next = mpc_next,
- .stop = mpc_stop,
- .show = mpc_show
-};
-static int proc_mpc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &mpc_op);
+ free_page(page);
+ return length;
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
- char *page, *p;
- unsigned len;
+ int incoming, error, retval;
+ char *page, c;
+ const char __user *tmp;
- if (nbytes == 0)
- return 0;
+ if (nbytes == 0) return 0;
+ if (nbytes >= PAGE_SIZE) nbytes = PAGE_SIZE-1;
- if (nbytes >= PAGE_SIZE)
- nbytes = PAGE_SIZE-1;
+ error = verify_area(VERIFY_READ, buff, nbytes);
+ if (error) return error;
page = (char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- for (p = page, len = 0; len < nbytes; p++, len++) {
- if (get_user(*p, buff++)) {
- free_page((unsigned long)page);
- return -EFAULT;
- }
- if (*p == '\0' || *p == '\n')
+ if (page == NULL) return -ENOMEM;
+
+ incoming = 0;
+ tmp = buff;
+ while(incoming < nbytes){
+ if (get_user(c, tmp++)) return -EFAULT;
+ incoming++;
+ if (c == '\0' || c == '\n')
break;
}
- *p = '\0';
+ retval = copy_from_user(page, buff, incoming);
+ if (retval != 0) {
+ printk("mpoa: proc_mpc_write: copy_from_user() failed\n");
+ return -EFAULT;
+ }
+
+ *ppos += incoming;
- if (!parse_qos(page))
+ page[incoming] = '\0';
+ retval = parse_qos(page, incoming);
+ if (retval == 0)
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
- return len;
+ return nbytes;
}
-static int parse_qos(const char *buff)
+static int parse_qos(const char *buff, int len)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
- unsigned char ip[4];
- int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
+
+ int pos, i;
uint32_t ipaddr;
+ unsigned char ip[4];
+ char cmd[4], temp[256];
+ const char *tmp, *prev;
struct atm_qos qos;
+ int value[5];
memset(&qos, 0, sizeof(struct atm_qos));
-
- if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
- ip, ip+1, ip+2, ip+3) == 4) {
- ipaddr = *(uint32_t *)ip;
- return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
- }
-
- if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
- ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
- rx_pcr = tx_pcr;
- rx_sdu = tx_sdu;
- } else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
- ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
- return 0;
-
+ strlcpy(cmd, buff, sizeof(cmd));
+ if( strncmp(cmd,"add", 3) && strncmp(cmd,"del", 3))
+ return 0; /* not add or del */
+
+ pos = 4;
+ /* next parse ip */
+ prev = buff + pos;
+ for (i = 0; i < 3; i++) {
+ tmp = strchr(prev, '.');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ ip[i] = (char)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, ' ');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ ip[i] = (char)simple_strtoul(temp, NULL, 0);
ipaddr = *(uint32_t *)ip;
+
+ if(!strncmp(cmd, "del", 3))
+ return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
+
+ /* next transmit values */
+ tmp = strstr(buff, "tx=");
+ if(tmp == NULL) return 0;
+ tmp += 3;
+ prev = tmp;
+ for( i = 0; i < 1; i++){
+ tmp = strchr(prev, ',');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, ' ');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
qos.txtp.traffic_class = ATM_CBR;
- qos.txtp.max_pcr = tx_pcr;
- qos.txtp.max_sdu = tx_sdu;
- qos.rxtp.traffic_class = ATM_CBR;
- qos.rxtp.max_pcr = rx_pcr;
- qos.rxtp.max_sdu = rx_sdu;
+ qos.txtp.max_pcr = value[0];
+ qos.txtp.max_sdu = value[1];
+
+ /* next receive values */
+ tmp = strstr(buff, "rx=");
+ if(tmp == NULL) return 0;
+ if (strstr(buff, "rx=tx")) { /* rx == tx */
+ qos.rxtp.traffic_class = qos.txtp.traffic_class;
+ qos.rxtp.max_pcr = qos.txtp.max_pcr;
+ qos.rxtp.max_cdv = qos.txtp.max_cdv;
+ qos.rxtp.max_sdu = qos.txtp.max_sdu;
+ } else {
+ tmp += 3;
+ prev = tmp;
+ for( i = 0; i < 1; i++){
+ tmp = strchr(prev, ',');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, '\0');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ qos.rxtp.traffic_class = ATM_CBR;
+ qos.rxtp.max_pcr = value[0];
+ qos.rxtp.max_sdu = value[1];
+ }
qos.aal = ATM_AAL5;
dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr,
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
- __module_get(THIS_MODULE);
+ (void) try_module_get(THIS_MODULE);
return 0;
}
? -EFAULT : 0;
goto done;
case ATM_SETLOOP:
- if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
- __ATM_LM_XTLOC((int) (unsigned long) buf) >
- __ATM_LM_XTRMT((int) (unsigned long) buf)) {
+ if (__ATM_LM_XTRMT((int) (long) buf) &&
+ __ATM_LM_XTLOC((int) (long) buf) >
+ __ATM_LM_XTRMT((int) (long) buf)) {
error = -EINVAL;
goto done;
}
RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
- HIDP Module (Human Interface Device Protocol)
Say Y here to compile Bluetooth support into the kernel or say M to
compile it as module (bluetooth).
struct bnep_connlist_req {
__u32 cnum;
- struct bnep_conninfo __user *ci;
+ struct bnep_conninfo *ci;
};
struct bnep_proto_filter {
#define BT_DBG(D...)
#endif
-#define VERSION "1.2"
+#define VERSION "1.0"
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
static int bnep_send(struct bnep_session *s, void *data, size_t len)
{
struct socket *sock = s->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
- return kernel_sendmsg(sock, &s->msg, &iv, 1, len);
+ s->msg.msg_iov = &iv;
+ s->msg.msg_iovlen = 1;
+ return sock_sendmsg(sock, &s->msg, len);
}
static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp)
return bnep_send(s, &rsp, sizeof(rsp));
}
-#ifdef CONFIG_BT_BNEP_PROTO_FILTER
-static inline void bnep_set_default_proto_filter(struct bnep_session *s)
-{
- /* (IPv4, ARP) */
- s->proto_filter[0].start = htons(0x0800);
- s->proto_filter[0].end = htons(0x0806);
- /* (RARP, AppleTalk) */
- s->proto_filter[1].start = htons(0x8035);
- s->proto_filter[1].end = htons(0x80F3);
- /* (IPX, IPv6) */
- s->proto_filter[2].start = htons(0x8137);
- s->proto_filter[2].end = htons(0x86DD);
-}
-#endif
-
static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len)
{
int n;
BT_DBG("proto filter start %d end %d",
f[i].start, f[i].end);
}
-
if (i < BNEP_MAX_PROTO_FILTERS)
memset(f + i, 0, sizeof(*f));
- if (n == 0)
- bnep_set_default_proto_filter(s);
-
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS);
} else {
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED);
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
- struct kvec iv[3];
+ struct iovec iv[3];
int len = 0, il = 0;
u8 type = 0;
goto send;
}
- iv[il++] = (struct kvec) { &type, 1 };
+ iv[il++] = (struct iovec) { &type, 1 };
len++;
if (!memcmp(eh->h_dest, s->eh.h_source, ETH_ALEN))
type = __bnep_tx_types[type];
switch (type) {
case BNEP_COMPRESSED_SRC_ONLY:
- iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN };
+ iv[il++] = (struct iovec) { eh->h_source, ETH_ALEN };
len += ETH_ALEN;
break;
case BNEP_COMPRESSED_DST_ONLY:
- iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN };
+ iv[il++] = (struct iovec) { eh->h_dest, ETH_ALEN };
len += ETH_ALEN;
break;
}
send:
- iv[il++] = (struct kvec) { skb->data, skb->len };
+ iv[il++] = (struct iovec) { skb->data, skb->len };
len += skb->len;
/* FIXME: linearize skb */
{
- len = kernel_sendmsg(sock, &s->msg, iv, il, len);
+ s->msg.msg_iov = iv;
+ s->msg.msg_iovlen = il;
+ len = sock_sendmsg(sock, &s->msg, len);
}
kfree_skb(skb);
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&s->killed)) {
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
-
+
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
- bnep_set_default_proto_filter(s);
-#endif
+ /* (IPv4, ARP) */
+ s->proto_filter[0].start = htons(0x0800);
+ s->proto_filter[0].end = htons(0x0806);
+ /* (RARP, AppleTalk) */
+ s->proto_filter[1].start = htons(0x8035);
+ s->proto_filter[1].end = htons(0x80F3);
+ /* (IPX, IPv6) */
+ s->proto_filter[2].start = htons(0x8137);
+ s->proto_filter[2].end = htons(0x86DD);
+#endif
+
err = register_netdev(dev);
if (err) {
goto failed;
static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len)
{
struct socket *sock = session->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p data %p len %d", session, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int cmtp_process_transmit(struct cmtp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&session->terminate)) {
static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
{
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("sock %p data %p len %d", sock, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int hidp_process_transmit(struct hidp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)
{
+ mm_segment_t fs;
int r;
rfcomm_lock();
+ fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_open(d, src, dst, channel);
+ set_fs(fs);
rfcomm_unlock();
return r;
int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
+ mm_segment_t fs;
int r;
rfcomm_lock();
+ fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_close(d, err);
+ set_fs(fs);
rfcomm_unlock();
return r;
{
struct rfcomm_session *s = NULL;
struct sockaddr_l2 addr;
+ struct l2cap_options opts;
struct socket *sock;
- struct sock *sk;
+ int size;
BT_DBG("%s %s", batostr(src), batostr(dst));
goto failed;
/* Set L2CAP options */
- sk = sock->sk;
- lock_sock(sk);
- l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
- release_sock(sk);
+ size = sizeof(opts);
+ sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
+
+ opts.imtu = RFCOMM_MAX_L2CAP_MTU;
+ sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
s = rfcomm_session_add(sock, BT_BOUND);
if (!s) {
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
struct socket *sock = s->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p len %d", s, len);
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
{
struct socket *sock = s->sock;
- struct kvec iv[3];
+ struct iovec iv[3];
struct msghdr msg;
unsigned char hdr[5], crc[1];
iv[2].iov_len = 1;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 3;
+ msg.msg_iov = iv;
- return kernel_sendmsg(sock, &msg, iv, 3, 6 + len);
+ return sock_sendmsg(sock, &msg, 6 + len);
}
static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
static int rfcomm_add_listener(bdaddr_t *ba)
{
struct sockaddr_l2 addr;
+ struct l2cap_options opts;
struct socket *sock;
- struct sock *sk;
struct rfcomm_session *s;
- int err = 0;
+ int size, err = 0;
/* Create socket */
err = rfcomm_l2sock_create(&sock);
}
/* Set L2CAP options */
- sk = sock->sk;
- lock_sock(sk);
- l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
- release_sock(sk);
+ size = sizeof(opts);
+ sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
+
+ opts.imtu = RFCOMM_MAX_L2CAP_MTU;
+ sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
/* Start listening on the socket */
err = sock->ops->listen(sock, 10);
set_user_nice(current, -10);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
BT_DBG("");
rfcomm_add_listener(BDADDR_ANY);
return &br->statistics;
}
-int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_bridge *br = netdev_priv(dev);
- const unsigned char *dest = skb->data;
+ struct net_bridge *br;
+ unsigned char *dest;
struct net_bridge_fdb_entry *dst;
+ br = dev->priv;
br->statistics.tx_packets++;
br->statistics.tx_bytes += skb->len;
- skb->mac.raw = skb->data;
+ dest = skb->mac.raw = skb->data;
skb_pull(skb, ETH_HLEN);
- rcu_read_lock();
- if (dest[0] & 1)
+ if (dest[0] & 1) {
br_flood_deliver(br, skb, 0);
- else if ((dst = __br_fdb_get(br, dest)) != NULL)
+ return 0;
+ }
+
+ if ((dst = br_fdb_get(br, dest)) != NULL) {
br_deliver(dst->dst, skb);
- else
- br_flood_deliver(br, skb, 0);
+ br_fdb_put(dst);
+ return 0;
+ }
- rcu_read_unlock();
+ br_flood_deliver(br, skb, 0);
return 0;
}
+int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = __br_dev_xmit(skb, dev);
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int br_dev_open(struct net_device *dev)
{
netif_start_queue(dev);
static __inline__ void fdb_delete(struct net_bridge_fdb_entry *f)
{
- hlist_del_rcu(&f->hlist);
+ hlist_del(&f->hlist);
if (!f->is_static)
- list_del(&f->u.age_list);
+ list_del(&f->age_list);
br_fdb_put(f);
}
struct net_bridge *br = p->br;
int i;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
fdb_insert(br, p, newaddr, 1);
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
struct list_head *l, *n;
unsigned long delay;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
delay = hold_time(br);
list_for_each_safe(l, n, &br->age_list) {
- struct net_bridge_fdb_entry *f;
- unsigned long expires;
-
- f = list_entry(l, struct net_bridge_fdb_entry, u.age_list);
- expires = f->ageing_timer + delay;
+ struct net_bridge_fdb_entry *f
+ = list_entry(l, struct net_bridge_fdb_entry, age_list);
+ unsigned long expires = f->ageing_timer + delay;
if (time_before_eq(expires, jiffies)) {
WARN_ON(f->is_static);
break;
}
}
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
{
int i;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
skip_delete: ;
}
}
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
-struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
- const unsigned char *addr)
+struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, unsigned char *addr)
{
struct hlist_node *h;
- struct net_bridge_fdb_entry *fdb;
- hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
+ read_lock_bh(&br->hash_lock);
+
+ hlist_for_each(h, &br->hash[br_mac_hash(addr)]) {
+ struct net_bridge_fdb_entry *fdb
+ = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
+
if (!memcmp(fdb->addr.addr, addr, ETH_ALEN)) {
- if (unlikely(has_expired(br, fdb)))
- break;
+ if (has_expired(br, fdb))
+ goto ret_null;
+
+ atomic_inc(&fdb->use_count);
+ read_unlock_bh(&br->hash_lock);
return fdb;
}
}
-
+ ret_null:
+ read_unlock_bh(&br->hash_lock);
return NULL;
}
-/* Interface used by ATM hook that keeps a ref count */
-struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
- unsigned char *addr)
-{
- struct net_bridge_fdb_entry *fdb;
-
- rcu_read_lock();
- fdb = __br_fdb_get(br, addr);
- if (fdb)
- atomic_inc(&fdb->use_count);
- rcu_read_unlock();
- return fdb;
-}
-
-static void fdb_rcu_free(struct rcu_head *head)
-{
- struct net_bridge_fdb_entry *ent
- = container_of(head, struct net_bridge_fdb_entry, u.rcu);
- kmem_cache_free(br_fdb_cache, ent);
-}
-
-/* Set entry up for deletion with RCU */
void br_fdb_put(struct net_bridge_fdb_entry *ent)
{
if (atomic_dec_and_test(&ent->use_count))
- call_rcu(&ent->u.rcu, fdb_rcu_free);
+ kmem_cache_free(br_fdb_cache, ent);
}
/*
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
- rcu_read_lock();
+ read_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+ hlist_for_each_entry(f, h, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
}
out:
- rcu_read_unlock();
+ read_unlock_bh(&br->hash_lock);
return num;
}
return 0;
/* move to end of age list */
- list_del(&fdb->u.age_list);
+ list_del(&fdb->age_list);
goto update;
}
}
memcpy(fdb->addr.addr, addr, ETH_ALEN);
atomic_set(&fdb->use_count, 1);
- hlist_add_head_rcu(&fdb->hlist, &br->hash[hash]);
+ hlist_add_head(&fdb->hlist, &br->hash[hash]);
if (!timer_pending(&br->gc_timer)) {
br->gc_timer.expires = jiffies + hold_time(br);
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
if (!is_local)
- list_add_tail(&fdb->u.age_list, &br->age_list);
+ list_add_tail(&fdb->age_list, &br->age_list);
return 0;
}
{
int ret;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, is_local);
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
return ret;
}
const struct sk_buff *skb)
{
if (skb->dev == p->dev ||
+ skb->len > p->dev->mtu ||
p->state != BR_STATE_FORWARDING)
return 0;
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
- if (skb->len > skb->dev->mtu)
- kfree_skb(skb);
- else {
#ifdef CONFIG_BRIDGE_NETFILTER
- /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
- nf_bridge_maybe_copy_header(skb);
+ /* ip_refrag calls ip_fragment, which doesn't copy the MAC header. */
+ nf_bridge_maybe_copy_header(skb);
#endif
- skb_push(skb, ETH_HLEN);
+ skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
+ dev_queue_xmit(skb);
return 0;
}
br->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&br->port_list);
- br->hash_lock = SPIN_LOCK_UNLOCKED;
+ br->hash_lock = RW_LOCK_UNLOCKED;
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
return ret;
}
-/* Mtu of the bridge pseudo-device 1500 or the minimum of the ports */
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
- if ((br->dev->flags & IFF_UP)
- && (dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ if ((br->dev->flags & IFF_UP) && (dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
- dev_set_mtu(br->dev, br_min_mtu(br));
+ br->dev->mtu = br_min_mtu(br);
}
return err;
goto out;
}
- dst = __br_fdb_get(br, dest);
+ dst = br_fdb_get(br, dest);
if (dst != NULL && dst->is_local) {
if (!passedup)
br_pass_frame_up(br, skb);
else
kfree_skb(skb);
+ br_fdb_put(dst);
goto out;
}
if (dst != NULL) {
br_forward(dst->dst, skb);
+ br_fdb_put(dst);
goto out;
}
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
-struct notifier_block br_device_notifier = {
+struct notifier_block br_device_notifier =
+{
.notifier_call = br_device_event
};
-/*
- * Handle changes in state of network devices enslaved to a bridge.
- *
- * Note: don't care about up/down if bridge itself is down, because
- * port state is checked when bridge is brought up.
- */
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
- struct net_bridge_port *p = dev->br_port;
+ struct net_device *dev;
+ struct net_bridge_port *p;
struct net_bridge *br;
- /* not a port of a bridge */
+ dev = ptr;
+ p = dev->br_port;
+
if (p == NULL)
return NOTIFY_DONE;
br = p->br;
- spin_lock_bh(&br->lock);
switch (event) {
- case NETDEV_CHANGEMTU:
- dev_set_mtu(br->dev, br_min_mtu(br));
- break;
-
case NETDEV_CHANGEADDR:
+ spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
- br_stp_recalculate_bridge_id(br);
+ if (br->dev->flags & IFF_UP)
+ br_stp_recalculate_bridge_id(br);
+ spin_unlock_bh(&br->lock);
break;
- case NETDEV_CHANGE: /* device is up but carrier changed */
- if (!(br->dev->flags & IFF_UP))
- break;
-
- if (netif_carrier_ok(dev)) {
- if (p->state == BR_STATE_DISABLED)
- br_stp_enable_port(p);
- } else {
- if (p->state != BR_STATE_DISABLED)
- br_stp_disable_port(p);
- }
+ case NETDEV_CHANGEMTU:
+ br->dev->mtu = br_min_mtu(br);
break;
case NETDEV_DOWN:
- if (br->dev->flags & IFF_UP)
+ if (br->dev->flags & IFF_UP) {
+ spin_lock_bh(&br->lock);
br_stp_disable_port(p);
+ spin_unlock_bh(&br->lock);
+ }
break;
case NETDEV_UP:
- if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
+ if (br->dev->flags & IFF_UP) {
+ spin_lock_bh(&br->lock);
br_stp_enable_port(p);
+ spin_unlock_bh(&br->lock);
+ }
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
- }
- spin_unlock_bh(&br->lock);
+ }
return NOTIFY_DONE;
}
{
struct hlist_node hlist;
struct net_bridge_port *dst;
- union {
- struct list_head age_list;
- struct rcu_head rcu;
- } u;
+ struct list_head age_list;
atomic_t use_count;
unsigned long ageing_timer;
mac_addr addr;
struct list_head port_list;
struct net_device *dev;
struct net_device_stats statistics;
- spinlock_t hash_lock;
+ rwlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
struct list_head age_list;
extern void br_fdb_cleanup(unsigned long arg);
extern void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *p);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
- const unsigned char *addr);
extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
- unsigned char *addr);
+ unsigned char *addr);
extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long count, unsigned long off);
int isroot = br_is_root_bridge(br);
pr_info("%s: topology change detected, %s\n", br->dev->name,
- isroot ? "propagating" : "sending tcn bpdu");
+ isroot ? "propgating" : "sending tcn bpdu");
if (isroot) {
br->topology_change = 1;
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
- if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev))
+ if (p->dev->flags & IFF_UP)
br_stp_enable_port(p);
}
#include <net/iw_handler.h>
#endif /* CONFIG_NET_RADIO */
#include <asm/current.h>
-#include <linux/vs_network.h>
/* This define, if set, will randomly drop a packet when congestion
* is more than moderate. It helps fairness in the multi-interface
*/
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
+#ifdef CONFIG_NET_FASTROUTE
+int netdev_fastroute;
+int netdev_fastroute_obstacles;
+#endif
+
#ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void);
extern int netdev_register_sysfs(struct net_device *);
int hash;
spin_lock_bh(&ptype_lock);
+#ifdef CONFIG_NET_FASTROUTE
+ if (pt->af_packet_priv) {
+ netdev_fastroute_obstacles++;
+ dev_clear_fastroute(pt->dev);
+ }
+#endif
if (pt->type == htons(ETH_P_ALL)) {
netdev_nit++;
list_add_rcu(&pt->list, &ptype_all);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
+#ifdef CONFIG_NET_FASTROUTE
+ if (pt->af_packet_priv)
+ netdev_fastroute_obstacles--;
+#endif
list_del_rcu(&pt->list);
goto out;
}
return ret;
}
+#ifdef CONFIG_NET_FASTROUTE
+
+static void dev_do_clear_fastroute(struct net_device *dev)
+{
+ if (dev->accept_fastpath) {
+ int i;
+
+ for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
+ struct dst_entry *dst;
+
+ write_lock_irq(&dev->fastpath_lock);
+ dst = dev->fastpath[i];
+ dev->fastpath[i] = NULL;
+ write_unlock_irq(&dev->fastpath_lock);
+
+ dst_release(dst);
+ }
+ }
+}
+
+void dev_clear_fastroute(struct net_device *dev)
+{
+ if (dev) {
+ dev_do_clear_fastroute(dev);
+ } else {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev; dev = dev->next)
+ dev_do_clear_fastroute(dev);
+ read_unlock(&dev_base_lock);
+ }
+}
+#endif
+
/**
* dev_close - shutdown an interface.
* @dev: device to shutdown
*/
dev->flags &= ~IFF_UP;
+#ifdef CONFIG_NET_FASTROUTE
+ dev_clear_fastroute(dev);
+#endif
/*
* Tell people we are down
} \
}
-static inline void qdisc_run(struct net_device *dev)
-{
- while (!netif_queue_stopped(dev) &&
- qdisc_restart(dev)<0)
- /* NOTHING */;
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
__get_cpu_var(netdev_rx_stat).total++;
+#ifdef CONFIG_NET_FASTROUTE
+ if (skb->pkt_type == PACKET_FASTROUTE) {
+ __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
+ return dev_queue_xmit(skb);
+ }
+#endif
+
skb->h.raw = skb->nh.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->mac.raw;
total = 0;
for (dev = dev_base; dev; dev = dev->next) {
+ if (!dev_in_nx_info(dev, current->nx_info))
+ continue;
for (i = 0; i < NPROTO; i++) {
if (gifconf_list[i]) {
int done;
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
+ struct nx_info *nxi = current->nx_info;
+
+ if (!dev_in_nx_info(dev, nxi))
+ return;
if (dev->get_stats) {
struct net_device_stats *stats = dev->get_stats(dev);
if ((dev->promiscuity += inc) == 0)
dev->flags &= ~IFF_PROMISC;
if (dev->flags ^ old_flags) {
+#ifdef CONFIG_NET_FASTROUTE
+ if (dev->flags & IFF_PROMISC) {
+ netdev_fastroute_obstacles++;
+ dev_clear_fastroute(dev);
+ } else
+ netdev_fastroute_obstacles--;
+#endif
dev_mc_upload(dev);
printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
spin_lock_init(&dev->ingress_lock);
#endif
+#ifdef CONFIG_NET_FASTROUTE
+ dev->fastpath_lock = RW_LOCK_UNLOCKED;
+#endif
+
ret = alloc_divert_blk(dev);
if (ret)
goto out;
while (atomic_read(&dev->refcnt) != 0) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
rtnl_shlock();
+ rtnl_exlock();
/* Rebroadcast unregister notification */
notifier_call_chain(&netdev_chain,
linkwatch_run_queue();
}
+ rtnl_exunlock();
rtnl_shunlock();
rebroadcast_time = jiffies;
synchronize_net();
+#ifdef CONFIG_NET_FASTROUTE
+ dev_clear_fastroute(dev);
+#endif
+
/* Shutdown queueing discipline. */
dev_shutdown(dev);
EXPORT_SYMBOL(dev_remove_pack);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
-EXPORT_SYMBOL(dev_change_flags);
-EXPORT_SYMBOL(dev_set_mtu);
EXPORT_SYMBOL(free_netdev);
EXPORT_SYMBOL(netdev_boot_setup_check);
EXPORT_SYMBOL(netdev_set_master);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook);
#endif
-
+/* for 801q VLAN support */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+EXPORT_SYMBOL(dev_change_flags);
+#endif
#ifdef CONFIG_KMOD
EXPORT_SYMBOL(dev_load);
#endif
EXPORT_SYMBOL(netdev_register_fc);
EXPORT_SYMBOL(netdev_unregister_fc);
#endif
+#ifdef CONFIG_NET_FASTROUTE
+EXPORT_SYMBOL(netdev_fastroute);
+EXPORT_SYMBOL(netdev_fastroute_obstacles);
+#endif
#ifdef CONFIG_NET_CLS_ACT
EXPORT_SYMBOL(ing_filter);
if (copy_to_user(useraddr, ®s, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (copy_to_user(useraddr, regbuf, regs.len))
+ if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;
clear_bit(LW_RUNNING, &linkwatch_flags);
rtnl_shlock();
+ rtnl_exlock();
linkwatch_run_queue();
+ rtnl_exunlock();
rtnl_shunlock();
}
for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
printk(" O=0x%8.8X", *opt++);
- printk(" MARK=%lu (0x%lu)",
- (long unsigned int)skb->nfmark,
- (long unsigned int)skb->nfmark);
printk("\n");
}
}
EXPORT_SYMBOL(nf_unregister_hook);
EXPORT_SYMBOL(nf_unregister_queue_handler);
EXPORT_SYMBOL(nf_unregister_sockopt);
-#ifdef CONFIG_NETFILTER_DEBUG
-EXPORT_SYMBOL(nf_dump_skb);
-#endif
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
MODULE_DESCRIPTION("Packet Generator tool");
MODULE_LICENSE("GPL");
-module_param(count_d, int, 0);
-module_param(ipg_d, int, 0);
-module_param(cpu_speed, int, 0);
-module_param(clone_skb_d, int, 0);
+MODULE_PARM(count_d, "i");
+MODULE_PARM(ipg_d, "i");
+MODULE_PARM(cpu_speed, "i");
+MODULE_PARM(clone_skb_d, "i");
void rtnl_lock(void)
{
rtnl_shlock();
+ rtnl_exlock();
}
void rtnl_unlock(void)
{
+ rtnl_exunlock();
rtnl_shunlock();
netdev_run_todo();
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
+ if (!dev_in_nx_info(dev, current->nx_info))
+ continue;
if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0) <= 0)
break;
}
struct sk_buff *skb;
int size = NLMSG_GOODSIZE;
+ if (!dev_in_nx_info(dev, current->nx_info))
+ return;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
return;
struct rtnetlink_link *link_tab;
struct rtattr *rta[RTATTR_MAX];
+ int exclusive = 0;
int sz_idx, kind;
int min_len;
int family;
return -1;
}
+ if (kind != 2) {
+ if (rtnl_exlock_nowait()) {
+ *errp = 0;
+ return -1;
+ }
+ exclusive = 1;
+ }
+
memset(&rta, 0, sizeof(rta));
min_len = rtm_min[sz_idx];
goto err_inval;
err = link->doit(skb, nlh, (void *)&rta);
+ if (exclusive)
+ rtnl_exunlock();
*errp = err;
return err;
err_inval:
+ if (exclusive)
+ rtnl_exunlock();
*errp = -EINVAL;
return -1;
}
#endif
#endif
- C(xid);
C(truesize);
atomic_set(&n->users, 1);
C(head);
#endif
new->tc_index = old->tc_index;
#endif
- new->xid = old->xid;
atomic_set(&new->users, 1);
}
return -EFAULT;
}
-/* Keep iterating until skb_iter_next returns false. */
-void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
-{
- i->len = skb_headlen(skb);
- i->data = (unsigned char *)skb->data;
- i->nextfrag = 0;
- i->fraglist = NULL;
-}
-
-int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
-{
- /* Unmap previous, if not head fragment. */
- if (i->nextfrag)
- kunmap_skb_frag(i->data);
-
- if (i->fraglist) {
- fraglist:
- /* We're iterating through fraglist. */
- if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
- i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
- ->frags[i->nextfrag]);
- i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
- .size;
- i->nextfrag++;
- return 1;
- }
- /* Fragments with fragments? Too hard! */
- BUG_ON(skb_shinfo(i->fraglist)->frag_list);
- i->fraglist = i->fraglist->next;
- if (!i->fraglist)
- goto end;
-
- i->len = skb_headlen(i->fraglist);
- i->data = i->fraglist->data;
- i->nextfrag = 0;
- return 1;
- }
-
- if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
- i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
- i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
- i->nextfrag++;
- return 1;
- }
-
- i->fraglist = skb_shinfo(skb)->frag_list;
- if (i->fraglist)
- goto fraglist;
-
-end:
- /* Bug trap for callers */
- i->data = NULL;
- return 0;
-}
-
-void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
-{
- /* Unmap previous, if not head fragment. */
- if (i->data && i->nextfrag)
- kunmap_skb_frag(i->data);
- /* Bug trap for callers */
- i->data = NULL;
-}
-
/* Checksum skb data. */
unsigned int skb_checksum(const struct sk_buff *skb, int offset,
EXPORT_SYMBOL(skb_unlink);
EXPORT_SYMBOL(skb_append);
EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_iter_first);
-EXPORT_SYMBOL(skb_iter_next);
-EXPORT_SYMBOL(skb_iter_abort);
clear_bit(SOCK_PASS_CRED, &sock->flags);
break;
- case SO_SETXID:
- if (current->xid) {
- ret = -EPERM;
- break;
- }
- if (val < 0 || val > MAX_S_CONTEXT) {
- ret = -EINVAL;
- break;
- }
- sk->sk_xid = val;
- break;
-
case SO_TIMESTAMP:
sk->sk_rcvtstamp = valbool;
if (valbool)
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
+ /*
+ * If sendmsg cached page exists, toss it.
+ */
+ if (sk->sk_sndmsg_page) {
+ __free_page(sk->sk_sndmsg_page);
+ sk->sk_sndmsg_page = NULL;
+ }
+
security_sk_free(sk);
BUG_ON(sk->sk_vx_info);
BUG_ON(sk->sk_nx_info);
+/* clr_vx_info(&sk->sk_vx_info);
+ clr_nx_info(&sk->sk_nx_info); */
kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
}
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
- struct msghdr msg = {.msg_flags = flags};
- struct kvec iov;
- char *kaddr = kmap(page);
- iov.iov_base = kaddr + offset;
+ struct msghdr msg;
+ struct iovec iov;
+ mm_segment_t old_fs;
+ char *kaddr;
+
+ kaddr = kmap(page);
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = flags;
+
+ /* This cast is ok because of the "set_fs(KERNEL_DS)" */
+ iov.iov_base = (void __user *) (kaddr + offset);
iov.iov_len = size;
- res = kernel_sendmsg(sock, &msg, &iov, 1, size);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ res = sock_sendmsg(sock, &msg, size);
+ set_fs(old_fs);
+
kunmap(page);
return res;
}
.mode = 0644,
.proc_handler = &proc_dointvec
},
+#ifdef CONFIG_NET_FASTROUTE
+ {
+ .ctl_name = NET_CORE_FASTROUTE,
+ .procname = "netdev_fastroute",
+ .data = &netdev_fastroute,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+#endif
{
.ctl_name = NET_CORE_MSG_COST,
.procname = "message_cost",
static int max_priority[] = { 127 }; /* From DECnet spec */
static int dn_forwarding_proc(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
static int dn_forwarding_proc(ctl_table *table, int write,
struct file *filep,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
#ifdef CONFIG_DECNET_ROUTER
struct net_device *dev = table->extra1;
dn_db = dev->dn_ptr;
old = dn_db->parms.forwarding;
- err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
+ err = proc_dointvec(table, write, filep, buffer, lenp);
if ((err >= 0) && write) {
if (dn_db->parms.forwarding < 0)
static int dn_node_address_handler(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char addr[DN_ASCBUF_LEN];
size_t len;
dn_address dnaddr;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
dn_dev_devices_on();
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
return -EFAULT;
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static int dn_def_dev_handler(ctl_table *table, int write,
struct file * filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
size_t len;
struct net_device *dev;
char devname[17];
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
dev_put(dev);
return -ENODEV;
}
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
}
return -EFAULT;
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(AUN_PORT),
- .sin_addr = {.s_addr = addr}
- };
- struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
- struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
+ struct sockaddr_in sin;
+ struct iovec iov;
+ struct aunhdr ah;
struct msghdr udpmsg;
+ int err;
+ mm_segment_t oldfs;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(AUN_PORT);
+ sin.sin_addr.s_addr = addr;
+
+ ah.code = code;
+ ah.pad = 0;
+ ah.port = 0;
+ ah.cb = cb;
+ ah.handle = seq;
+
+ iov.iov_base = (void *)&ah;
+ iov.iov_len = sizeof(ah);
+
udpmsg.msg_name = (void *)&sin;
udpmsg.msg_namelen = sizeof(sin);
+ udpmsg.msg_iov = &iov;
+ udpmsg.msg_iovlen = 1;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
- kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ err = sock_sendmsg(udpsock, &udpmsg, sizeof(ah));
+ set_fs(oldfs);
}
source "net/ipv4/ipvs/Kconfig"
-#
-# Emulab special
-#
-
-config ICMP_IPOD
- bool "ICMP: ICMP Ping-of-Death (Emulab)"
- depends on INET && SYSCTL
- ---help---
- Support immediately rebooting upon receiving a specially
- formed ICMP type 6 packet whose payload matches a string
- configured by the administrator.
ip_input.o ip_fragment.o ip_forward.o ip_options.o \
ip_output.o ip_sockglue.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o tcp_minisocks.o \
- tcp_diag.o datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
+ tcp_diag.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
sysctl_net_ipv4.o fib_frontend.o fib_semantics.o fib_hash.o
obj-$(CONFIG_PROC_FS) += proc.o
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
-#include <linux/vs_limit.h>
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
if (inet->opt)
kfree(inet->opt);
- vx_sock_dec(sk);
- clr_vx_info(&sk->sk_vx_info);
- sk->sk_xid = -1;
- clr_nx_info(&sk->sk_nx_info);
- sk->sk_nid = -1;
-
+ BUG_ON(sk->sk_nx_info);
+ BUG_ON(sk->sk_vx_info);
dst_release(sk->sk_dst_cache);
#ifdef INET_REFCNT_DEBUG
atomic_dec(&inet_sock_nr);
if (!answer)
goto out_sk_free;
err = -EPERM;
- if ((protocol == IPPROTO_ICMP) && vx_ccaps(VXC_RAW_ICMP))
- goto override;
if (answer->capability > 0 && !capable(answer->capability))
goto out_sk_free;
-override:
err = -EPROTONOSUPPORT;
if (!protocol)
goto out_sk_free;
set_vx_info(&sk->sk_vx_info, current->vx_info);
sk->sk_xid = vx_current_xid();
- vx_sock_inc(sk);
set_nx_info(&sk->sk_nx_info, current->nx_info);
sk->sk_nid = nx_current_nid();
!(current->flags & PF_EXITING))
timeout = sk->sk_lingertime;
sock->sk = NULL;
- vx_sock_dec(sk);
clr_vx_info(&sk->sk_vx_info);
- sk->sk_xid = -1;
clr_nx_info(&sk->sk_nx_info);
- sk->sk_nid = -1;
sk->sk_prot->close(sk, timeout);
}
return 0;
unsigned short snum;
int chk_addr_ret;
int err;
+ __u32 s_addr; /* Address used for validation */
+ __u32 s_addr1;
+ __u32 s_addr2 = 0xffffffffl; /* Optional address of the socket */
+ struct nx_info *nxi = sk->sk_nx_info;
/* If the socket has its own bind function then use it. (RAW) */
if (sk->sk_prot->bind) {
if (addr_len < sizeof(struct sockaddr_in))
goto out;
- chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
+ s_addr = s_addr1 = addr->sin_addr.s_addr;
+ nxdprintk("inet_bind(%p) %p,%p;%lx\n",
+ sk, nx_info, sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+ if (nxi) {
+ __u32 v4_bcast = nxi->v4_bcast;
+ __u32 ipv4root = nxi->ipv4[0];
+ int nbipv4 = nxi->nbipv4;
+ if (s_addr == 0) {
+ s_addr = ipv4root;
+ if (nbipv4 > 1)
+ s_addr1 = 0;
+ else {
+ s_addr1 = ipv4root;
+ }
+ s_addr2 = v4_bcast;
+ } else if (s_addr == 0x0100007f) {
+ s_addr = s_addr1 = ipv4root;
+ } else if (s_addr != v4_bcast) {
+ int i;
+ for (i=0; i<nbipv4; i++) {
+ if (s_addr == nxi->ipv4[i])
+ break;
+ }
+ if (i == nbipv4) {
+ return -EADDRNOTAVAIL;
+ }
+ }
+ }
+ chk_addr_ret = inet_addr_type(s_addr);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
err = -EADDRNOTAVAIL;
if (!sysctl_ip_nonlocal_bind &&
!inet->freebind &&
- addr->sin_addr.s_addr != INADDR_ANY &&
+ s_addr != INADDR_ANY &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST)
if (sk->sk_state != TCP_CLOSE || inet->num)
goto out_release_sock;
- inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+ inet->rcv_saddr = inet->saddr = s_addr1;
+ inet->rcv_saddr2 = s_addr2;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->saddr = 0; /* Use device */
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ah.h>
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
+ iph->daddr = top_iph->daddr;
if (top_iph->ihl != 5) {
- iph->daddr = top_iph->daddr;
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
- if (top_iph->ihl != 5) {
- top_iph->daddr = iph->daddr;
+ top_iph->daddr = iph->daddr;
+ if (top_iph->ihl != 5)
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
- }
ip_send_check(top_iph);
if (ip_route_output_key(&rt, &fl) < 0)
return 1;
if (rt->u.dst.dev != dev) {
- NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
+ NET_INC_STATS_BH(ArpFilter);
flag = 1;
}
ip_rt_put(rt);
+++ /dev/null
-/*
- * common UDP/RAW code
- * Linux INET implementation
- *
- * Authors:
- * Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <net/route.h>
-
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct inet_opt *inet = inet_sk(sk);
- struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
- struct rtable *rt;
- u32 saddr;
- int oif;
- int err;
-
-
- if (addr_len < sizeof(*usin))
- return -EINVAL;
-
- if (usin->sin_family != AF_INET)
- return -EAFNOSUPPORT;
-
- sk_dst_reset(sk);
-
- oif = sk->sk_bound_dev_if;
- saddr = inet->saddr;
- if (MULTICAST(usin->sin_addr.s_addr)) {
- if (!oif)
- oif = inet->mc_index;
- if (!saddr)
- saddr = inet->mc_addr;
- }
- err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
- RT_CONN_FLAGS(sk), oif,
- sk->sk_protocol,
- inet->sport, usin->sin_port, sk);
- if (err)
- return err;
- if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
- ip_rt_put(rt);
- return -EACCES;
- }
- if (!inet->saddr)
- inet->saddr = rt->rt_src; /* Update source address */
- if (!inet->rcv_saddr)
- inet->rcv_saddr = rt->rt_src;
- inet->daddr = rt->rt_dst;
- inet->dport = usin->sin_port;
- sk->sk_state = TCP_ESTABLISHED;
- inet->id = jiffies;
-
- sk_dst_set(sk, &rt->u.dst);
- return(0);
-}
-
-EXPORT_SYMBOL(ip4_datagram_connect);
-
return rc;
}
+/*
+ Check that a device is not member of the ipv4root assigned to the process
+ Return true if this is the case
+
+ If the process is not bound to specific IP, then it returns 0 (all
+ interface are fine).
+*/
+static inline int devinet_notiproot (struct in_ifaddr *ifa)
+{
+ int ret = 0;
+ struct nx_info *nxi;
+
+ if ((nxi = current->nx_info)) {
+ int i;
+ int nbip = nxi->nbipv4;
+ __u32 addr = ifa->ifa_local;
+ ret = 1;
+ for (i=0; i<nbip; i++) {
+ if(nxi->ipv4[i] == addr) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
int devinet_ioctl(unsigned int cmd, void __user *arg)
{
ret = -EADDRNOTAVAIL;
if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
goto done;
+ if (!ifa_in_nx_info(ifa, current->nx_info))
+ goto done;
switch(cmd) {
case SIOCGIFADDR: /* Get interface address */
goto out;
for (; ifa; ifa = ifa->ifa_next) {
+ if (!ifa_in_nx_info(ifa, current->nx_info))
+ continue;
if (!buf) {
done += sizeof(ifr);
continue;
read_lock(&in_dev->lock);
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
+ if (!ifa_in_nx_info(ifa, current->nx_info))
+ continue;
if (ip_idx < s_ip_idx)
continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
static int devinet_sysctl_forward(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val) {
if (valp == &ipv4_devconf.forwarding)
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val)
rt_cache_flush(0);
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
return flags;
}
+extern int dev_in_nx_info(struct net_device *, struct nx_info *);
+
/*
* This outputs /proc/net/route.
*
mask = FZ_MASK(iter->zone);
flags = fib_flag_trans(f->fn_type, f->fn_state & FN_S_ZOMBIE,
mask, fi);
- if (fi)
+ if (fi && dev_in_nx_info(fi->fib_dev, current->nx_info))
snprintf(bf, sizeof(bf),
"%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
fi->fib_dev ? fi->fib_dev->name : "*", prefix,
*/
struct icmp_control {
- int output_entry; /* Field for increment on output */
- int input_entry; /* Field for increment on input */
+ int output_off; /* Field offset for increment on output */
+ int input_off; /* Field offset for increment on input */
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static void icmp_out_count(int type)
{
if (type <= NR_ICMP_TYPES) {
- ICMP_INC_STATS(icmp_pointers[type].output_entry);
- ICMP_INC_STATS(ICMP_MIB_OUTMSGS);
+ ICMP_INC_STATS_FIELD(icmp_pointers[type].output_off);
+ ICMP_INC_STATS(IcmpOutMsgs);
}
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
out:;
}
-#ifdef CONFIG_ICMP_IPOD
-#include <linux/reboot.h>
-
-int sysctl_icmp_ipod_version = 2;
-int sysctl_icmp_ipod_enabled = 0;
-u32 sysctl_icmp_ipod_host = 0xffffffff;
-u32 sysctl_icmp_ipod_mask = 0xffffffff;
-char sysctl_icmp_ipod_key[32+1] = { "SETMETOSOMETHINGTHIRTYTWOBYTES!!" };
-#define IPOD_CHECK_KEY \
- (sysctl_icmp_ipod_key[0] != 0)
-#define IPOD_VALID_KEY(d) \
- (strncmp(sysctl_icmp_ipod_key, (char *)(d), strlen(sysctl_icmp_ipod_key)) == 0)
-
-static void icmp_ping_of_death(struct sk_buff *skb)
-{
- struct icmphdr *icmph = skb->h.icmph;
- struct iphdr *iph = skb->nh.iph;
- int doit = 0;
-
-#if 0
- printk(KERN_INFO "IPOD: got type=6, code=%d, host=%u.%u.%u.%u\n", icmph->code, ntohs(iph->tot_len), NIPQUAD(iph->saddr));
-#endif
-
- /*
- * If IPOD not enabled or wrong ICMP code, ignore.
- */
- if (!sysctl_icmp_ipod_enabled || icmph->code != 6)
- return;
-
- /*
- * First check the source address info.
- * If host not set, ignore.
- */
- if (sysctl_icmp_ipod_host != 0xffffffff &&
- (ntohl(iph->saddr) & sysctl_icmp_ipod_mask) == sysctl_icmp_ipod_host) {
- /*
- * Now check the key if enabled.
- * If packet doesn't contain enough data or key
- * is otherwise invalid, ignore.
- */
- if (IPOD_CHECK_KEY) {
- if (pskb_may_pull(skb, sizeof(sysctl_icmp_ipod_key)-1) &&
- IPOD_VALID_KEY(skb->data))
- doit = 1;
- } else {
- doit = 1;
- }
- }
-
- if (doit) {
- sysctl_icmp_ipod_enabled = 0;
- printk(KERN_CRIT "IPOD: reboot forced by %u.%u.%u.%u...\n",
- NIPQUAD(iph->saddr));
- machine_restart(NULL);
- } else {
- printk(KERN_WARNING "IPOD: from %u.%u.%u.%u rejected\n",
- NIPQUAD(iph->saddr));
- }
-}
-#endif
-
static void icmp_discard(struct sk_buff *skb)
{
}
struct icmphdr *icmph;
struct rtable *rt = (struct rtable *)skb->dst;
- ICMP_INC_STATS_BH(ICMP_MIB_INMSGS);
+ ICMP_INC_STATS_BH(IcmpInMsgs);
switch (skb->ip_summed) {
case CHECKSUM_HW:
}
}
- ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry);
+ ICMP_INC_STATS_BH_FIELD(icmp_pointers[icmph->type].input_off);
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
error:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto drop;
}
*/
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
- .output_entry = ICMP_MIB_OUTECHOREPS,
- .input_entry = ICMP_MIB_INECHOREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutEchoReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInEchoReps),
.handler = icmp_discard,
},
[1] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[2] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
- .output_entry = ICMP_MIB_OUTDESTUNREACHS,
- .input_entry = ICMP_MIB_INDESTUNREACHS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutDestUnreachs),
+ .input_off = offsetof(struct icmp_mib, IcmpInDestUnreachs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
- .output_entry = ICMP_MIB_OUTSRCQUENCHS,
- .input_entry = ICMP_MIB_INSRCQUENCHS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutSrcQuenchs),
+ .input_off = offsetof(struct icmp_mib, IcmpInSrcQuenchs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
- .output_entry = ICMP_MIB_OUTREDIRECTS,
- .input_entry = ICMP_MIB_INREDIRECTS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutRedirects),
+ .input_off = offsetof(struct icmp_mib, IcmpInRedirects),
.handler = icmp_redirect,
.error = 1,
},
-#ifdef CONFIG_ICMP_IPOD
- [6] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
- .handler = icmp_ping_of_death,
- .error = 1,
- },
-#else
[6] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
-#endif
[7] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
- .output_entry = ICMP_MIB_OUTECHOS,
- .input_entry = ICMP_MIB_INECHOS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutEchos),
+ .input_off = offsetof(struct icmp_mib, IcmpInEchos),
.handler = icmp_echo,
},
[9] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[10] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
- .output_entry = ICMP_MIB_OUTTIMEEXCDS,
- .input_entry = ICMP_MIB_INTIMEEXCDS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimeExcds),
+ .input_off = offsetof(struct icmp_mib,IcmpInTimeExcds),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
- .output_entry = ICMP_MIB_OUTPARMPROBS,
- .input_entry = ICMP_MIB_INPARMPROBS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutParmProbs),
+ .input_off = offsetof(struct icmp_mib, IcmpInParmProbs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
- .output_entry = ICMP_MIB_OUTTIMESTAMPS,
- .input_entry = ICMP_MIB_INTIMESTAMPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimestamps),
+ .input_off = offsetof(struct icmp_mib, IcmpInTimestamps),
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
- .output_entry = ICMP_MIB_OUTTIMESTAMPREPS,
- .input_entry = ICMP_MIB_INTIMESTAMPREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimestampReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInTimestampReps),
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
- .output_entry = ICMP_MIB_OUTADDRMASKS,
- .input_entry = ICMP_MIB_INADDRMASKS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutAddrMasks),
+ .input_off = offsetof(struct icmp_mib, IcmpInAddrMasks),
.handler = icmp_address,
},
[ICMP_ADDRESSREPLY] = {
- .output_entry = ICMP_MIB_OUTADDRMASKREPS,
- .input_entry = ICMP_MIB_INADDRMASKREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutAddrMaskReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInAddrMaskReps),
.handler = icmp_address_reply,
},
};
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq,
- "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
+ seq_printf(seq,
+ "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
struct ip_mc_list *im = (struct ip_mc_list *)v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP_INC_STATS_BH(OutForwDatagrams);
if (unlikely(opt->optlen))
ip_forward_options(skb);
spin_unlock(&qp->lock);
ipq_put(qp);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
}
}
ipq_kill(qp);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmTimeout);
+ IP_INC_STATS_BH(ReasmFails);
if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
struct sk_buff *head = qp->fragments;
iph = head->nh.iph;
iph->frag_off = 0;
iph->tot_len = htons(len);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP_INC_STATS_BH(ReasmOKs);
qp->fragments = NULL;
return head;
"Oversized IP packet from %d.%d.%d.%d.\n",
NIPQUAD(qp->saddr));
out_fail:
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
return NULL;
}
struct ipq *qp;
struct net_device *dev;
- IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+ IP_INC_STATS_BH(ReasmReqds);
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
return ret;
}
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
kfree_skb(skb);
return NULL;
}
protocol = -ret;
goto resubmit;
}
- IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP_INC_STATS_BH(InDelivers);
} else {
if (!raw_sk) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+ IP_INC_STATS_BH(InUnknownProtos);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
- IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP_INC_STATS_BH(InDelivers);
kfree_skb(skb);
}
}
*/
if (skb_cow(skb, skb_headroom(skb))) {
- IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP_INC_STATS_BH(InDiscards);
goto drop;
}
iph = skb->nh.iph;
return dst_input(skb);
inhdr_error:
- IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
return NET_RX_DROP;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
+ IP_INC_STATS_BH(InReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP_INC_STATS_BH(InDiscards);
goto out;
}
ip_rcv_finish);
inhdr_error:
- IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
out:
/*
* If the indicated interface is up and running, send the packet.
*/
- IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP_INC_STATS(OutRequests);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
{
struct sk_buff *skb = *pskb;
- IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP_INC_STATS(OutRequests);
if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list) &&
!skb_shinfo(skb)->tso_size)
dst_output);
no_route:
- IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(OutNoRoutes);
kfree_skb(skb);
return -EHOSTUNREACH;
}
}
if (err == 0) {
- IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP_INC_STATS(FragOKs);
return 0;
}
kfree_skb(frag);
frag = skb;
}
- IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(FragFails);
return err;
}
* Put this fragment into the sending queue.
*/
- IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+ IP_INC_STATS(FragCreates);
iph->tot_len = htons(len + hlen);
goto fail;
}
kfree_skb(skb);
- IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP_INC_STATS(FragOKs);
return err;
fail:
kfree_skb(skb);
- IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(FragFails);
return err;
}
error:
inet->cork.length -= length;
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
error:
inet->cork.length -= size;
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
return err;
error:
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
goto out;
}
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP_INC_STATS_BH(OutForwDatagrams);
if (unlikely(opt->optlen))
ip_forward_options(skb);
to blackhole.
*/
- IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS_BH(FragFails);
ip_rt_put(rt);
goto out_free;
}
static int
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = table->data;
int val = *valp;
int rc;
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, filp, buffer, lenp);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
static int
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = table->data;
int val[2];
/* backup the value first */
memcpy(val, valp, sizeof(val));
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, filp, buffer, lenp);
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
* First port is set to the default port.
*/
static int ports[IP_VS_APP_MAX_PORTS] = {21, 0};
-static int ports_c;
-module_param_array(ports, int, ports_c, 0);
/*
* Debug level
*/
#ifdef CONFIG_IP_VS_DEBUG
static int debug=0;
-module_param(debug, int, 0);
+MODULE_PARM(debug, "i");
#endif
+MODULE_PARM(ports, "1-" __MODULE_STRING(IP_VS_APP_MAX_PORTS) "i");
/* Dummy variable */
static int ip_vs_ftp_pasv;
while (data <= data_limit - 6) {
if (strnicmp(data, "PASV\r\n", 6) == 0) {
/* Passive mode on */
- IP_VS_DBG(1-debug, "got PASV at %zd of %zd\n",
+ IP_VS_DBG(1-debug, "got PASV at %d of %d\n",
data - data_start,
data_limit - data_start);
cp->app_data = &ip_vs_ftp_pasv;
(*pskb)->len - tcphoff,
cp->protocol,
(*pskb)->csum);
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
pp->name, tcph->check,
(char*)&(tcph->check) - (char*)tcph);
}
(*pskb)->csum);
if (udph->check == 0)
udph->check = 0xFFFF;
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
pp->name, udph->check,
(char*)&(udph->check) - (char*)udph);
}
static int
ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
{
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
- struct kvec iov;
+ struct msghdr msg;
+ mm_segment_t oldfs;
+ struct iovec iov;
int len;
EnterFunction(7);
iov.iov_base = (void *)buffer;
iov.iov_len = length;
-
- len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
+ msg.msg_name = 0;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL;
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_sendmsg(sock, &msg, (size_t)(length));
+ set_fs(oldfs);
LeaveFunction(7);
return len;
static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
- struct msghdr msg = {NULL,};
- struct kvec iov;
+ struct msghdr msg;
+ struct iovec iov;
int len;
+ mm_segment_t oldfs;
EnterFunction(7);
/* Receive a packet */
iov.iov_base = buffer;
iov.iov_len = (size_t)buflen;
-
- len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_recvmsg(sock, &msg, buflen, 0);
+ set_fs(oldfs);
if (len < 0)
return -1;
conntrack->ct_general.destroy = destroy_conntrack;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
- conntrack->xid[IP_CT_DIR_ORIGINAL] = -1;
conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;
conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;
- conntrack->xid[IP_CT_DIR_REPLY] = -1;
for (i=0; i < IP_CT_NUMBER; i++)
conntrack->infos[i].master = &conntrack->ct_general;
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
static int loose;
MODULE_PARM(loose, "i");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
MODULE_PARM(max_dcc_channels, "i");
MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per IRC session");
MODULE_PARM(dcc_timeout, "i");
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
+#endif
static char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " };
#define MINMATCHLEN 5
+++ /dev/null
-/*
- * ip_conntrack_pptp.c - Version 2.0
- *
- * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft. PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702. Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2003 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * Limitations:
- * - We blindly assume that control connections are always
- * established in PNS->PAC direction. This is a violation
- * of RFFC2673
- *
- * TODO: - finish support for multiple calls within one session
- * (needs expect reservations in newnat)
- * - testing of incoming PPTP calls
- *
- * Changes:
- * 2002-02-05 - Version 1.3
- * - Call ip_conntrack_unexpect_related() from
- * pptp_timeout_related() to destroy expectations in case
- * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen
- * (Philip Craig <philipc@snapgear.com>)
- * - Add Version information at module loadtime
- * 2002-02-10 - Version 1.6
- * - move to C99 style initializers
- * - remove second expectation if first arrives
- * 2004-10-22 - Version 2.0
- * - merge Mandrake's 2.6.x port with recent 2.6.x API changes
- * - fix lots of linear skb assumptions from Mandrake's port
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_CT_PPTP_VERSION "2.0"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
-
-DECLARE_LOCK(ip_pptp_lock);
-
-#if 0
-#include "ip_conntrack_pptp_priv.h"
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#define SECS *HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS * 24 HOURS
-
-#define PPTP_GRE_TIMEOUT (10 MINS)
-#define PPTP_GRE_STREAM_TIMEOUT (5 DAYS)
-
-static int pptp_expectfn(struct ip_conntrack *ct)
-{
- struct ip_conntrack *master;
- struct ip_conntrack_expect *exp;
-
- DEBUGP("increasing timeouts\n");
- /* increase timeout of GRE data channel conntrack entry */
- ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
- ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
-
- master = master_ct(ct);
- if (!master) {
- DEBUGP(" no master!!!\n");
- return 0;
- }
-
- exp = ct->master;
- if (!exp) {
- DEBUGP("no expectation!!\n");
- return 0;
- }
-
- DEBUGP("completing tuples with ct info\n");
- /* we can do this, since we're unconfirmed */
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(master->help.ct_pptp_info.pac_call_id)) {
- /* assume PNS->PAC */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(master->help.ct_pptp_info.pns_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(master->help.ct_pptp_info.pns_call_id);
- } else {
- /* assume PAC->PNS */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(master->help.ct_pptp_info.pac_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(master->help.ct_pptp_info.pac_call_id);
- }
-
- /* delete other expectation */
- if (exp->expected_list.next != &exp->expected_list) {
- struct ip_conntrack_expect *other_exp;
- struct list_head *cur_item, *next;
-
- for (cur_item = master->sibling_list.next;
- cur_item != &master->sibling_list; cur_item = next) {
- next = cur_item->next;
- other_exp = list_entry(cur_item,
- struct ip_conntrack_expect,
- expected_list);
- /* remove only if occurred at same sequence number */
- if (other_exp != exp && other_exp->seq == exp->seq) {
- DEBUGP("unexpecting other direction\n");
- ip_ct_gre_keymap_destroy(other_exp);
- ip_conntrack_unexpect_related(other_exp);
- }
- }
- }
-
- return 0;
-}
-
-/* timeout GRE data connections */
-static int pptp_timeout_related(struct ip_conntrack *ct)
-{
- struct list_head *cur_item, *next;
- struct ip_conntrack_expect *exp;
-
- /* FIXME: do we have to lock something ? */
- for (cur_item = ct->sibling_list.next;
- cur_item != &ct->sibling_list; cur_item = next) {
- next = cur_item->next;
- exp = list_entry(cur_item, struct ip_conntrack_expect,
- expected_list);
-
- ip_ct_gre_keymap_destroy(exp);
- if (!exp->sibling) {
- ip_conntrack_unexpect_related(exp);
- continue;
- }
-
- DEBUGP("setting timeout of conntrack %p to 0\n",
- exp->sibling);
- exp->sibling->proto.gre.timeout = 0;
- exp->sibling->proto.gre.stream_timeout = 0;
- /* refresh_acct will not modify counters if skb == NULL */
- ip_ct_refresh_acct(exp->sibling, 0, NULL, 0);
- }
-
- return 0;
-}
-
-/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
-static inline int
-exp_gre(struct ip_conntrack *master,
- u_int32_t seq,
- u_int16_t callid,
- u_int16_t peer_callid)
-{
- struct ip_conntrack_tuple inv_tuple;
- struct ip_conntrack_tuple exp_tuples[] = {
- /* tuple in original direction, PNS->PAC */
- { .src = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip,
- .u = { .gre = { .key = htonl(ntohs(peer_callid)) } }
- },
- .dst = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip,
- .u = { .gre = { .key = htonl(ntohs(callid)) } },
- .protonum = IPPROTO_GRE
- },
- },
- /* tuple in reply direction, PAC->PNS */
- { .src = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip,
- .u = { .gre = { .key = htonl(ntohs(callid)) } }
- },
- .dst = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip,
- .u = { .gre = { .key = htonl(ntohs(peer_callid)) } },
- .protonum = IPPROTO_GRE
- },
- }
- }, *exp_tuple;
-
- for (exp_tuple = exp_tuples; exp_tuple < &exp_tuples[2]; exp_tuple++) {
- struct ip_conntrack_expect *exp;
-
- exp = ip_conntrack_expect_alloc();
- if (exp == NULL)
- return 1;
-
- memcpy(&exp->tuple, exp_tuple, sizeof(exp->tuple));
-
- exp->mask.src.ip = 0xffffffff;
- exp->mask.src.u.all = 0;
- exp->mask.dst.u.all = 0;
- exp->mask.dst.u.gre.key = 0xffffffff;
- exp->mask.dst.ip = 0xffffffff;
- exp->mask.dst.protonum = 0xffff;
-
- exp->seq = seq;
- exp->expectfn = pptp_expectfn;
-
- exp->help.exp_pptp_info.pac_call_id = ntohs(callid);
- exp->help.exp_pptp_info.pns_call_id = ntohs(peer_callid);
-
- DEBUGP("calling expect_related ");
- DUMP_TUPLE_RAW(&exp->tuple);
-
- /* Add GRE keymap entries */
- if (ip_ct_gre_keymap_add(exp, &exp->tuple, 0) != 0) {
- kfree(exp);
- return 1;
- }
-
- invert_tuplepr(&inv_tuple, &exp->tuple);
- if (ip_ct_gre_keymap_add(exp, &inv_tuple, 1) != 0) {
- ip_ct_gre_keymap_destroy(exp);
- kfree(exp);
- return 1;
- }
-
- if (ip_conntrack_expect_related(exp, master) != 0) {
- ip_ct_gre_keymap_destroy(exp);
- kfree(exp);
- DEBUGP("cannot expect_related()\n");
- return 1;
- }
- }
-
- return 0;
-}
-
-static inline int
-pptp_inbound_pkt(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int ctlhoff,
- size_t datalen,
- struct ip_conntrack *ct)
-{
- struct PptpControlHeader _ctlh, *ctlh;
- unsigned int reqlen;
- union pptp_ctrl_union _pptpReq, *pptpReq;
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- u_int16_t msg, *cid, *pcid;
- u_int32_t seq;
-
- ctlh = skb_header_pointer(skb, ctlhoff, sizeof(_ctlh), &_ctlh);
- if (unlikely(!ctlh)) {
- DEBUGP("error during skb_header_pointer\n");
- return NF_ACCEPT;
- }
-
- reqlen = datalen - sizeof(struct pptp_pkt_hdr) - sizeof(_ctlh);
- pptpReq = skb_header_pointer(skb, ctlhoff+sizeof(struct pptp_pkt_hdr),
- reqlen, &_pptpReq);
- if (unlikely(!pptpReq)) {
- DEBUGP("error during skb_header_pointer\n");
- return NF_ACCEPT;
- }
-
- msg = ntohs(ctlh->messageType);
- DEBUGP("inbound control message %s\n", strMName[msg]);
-
- switch (msg) {
- case PPTP_START_SESSION_REPLY:
- if (reqlen < sizeof(_pptpReq.srep)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms new control session */
- if (info->sstate < PPTP_SESSION_REQUESTED) {
- DEBUGP("%s without START_SESS_REQUEST\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->srep.resultCode == PPTP_START_OK)
- info->sstate = PPTP_SESSION_CONFIRMED;
- else
- info->sstate = PPTP_SESSION_ERROR;
- break;
-
- case PPTP_STOP_SESSION_REPLY:
- if (reqlen < sizeof(_pptpReq.strep)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms end of control session */
- if (info->sstate > PPTP_SESSION_STOPREQ) {
- DEBUGP("%s without STOP_SESS_REQUEST\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->strep.resultCode == PPTP_STOP_OK)
- info->sstate = PPTP_SESSION_NONE;
- else
- info->sstate = PPTP_SESSION_ERROR;
- break;
-
- case PPTP_OUT_CALL_REPLY:
- if (reqlen < sizeof(_pptpReq.ocack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server accepted call, we now expect GRE frames */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- if (info->cstate != PPTP_CALL_OUT_REQ &&
- info->cstate != PPTP_CALL_OUT_CONF) {
- DEBUGP("%s without OUTCALL_REQ\n", strMName[msg]);
- break;
- }
- if (pptpReq->ocack.resultCode != PPTP_OUTCALL_CONNECT) {
- info->cstate = PPTP_CALL_NONE;
- break;
- }
-
- cid = &pptpReq->ocack.callID;
- pcid = &pptpReq->ocack.peersCallID;
-
- info->pac_call_id = ntohs(*cid);
-
- if (htons(info->pns_call_id) != *pcid) {
- DEBUGP("%s for unknown callid %u\n",
- strMName[msg], ntohs(*pcid));
- break;
- }
-
- DEBUGP("%s, CID=%X, PCID=%X\n", strMName[msg],
- ntohs(*cid), ntohs(*pcid));
-
- info->cstate = PPTP_CALL_OUT_CONF;
-
- seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
- + sizeof(struct PptpControlHeader)
- + ((void *)pcid - (void *)pptpReq);
-
- if (exp_gre(ct, seq, *cid, *pcid) != 0)
- printk("ip_conntrack_pptp: error during exp_gre\n");
- break;
-
- case PPTP_IN_CALL_REQUEST:
- if (reqlen < sizeof(_pptpReq.icack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server tells us about incoming call request */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- pcid = &pptpReq->icack.peersCallID;
- DEBUGP("%s, PCID=%X\n", strMName[msg], ntohs(*pcid));
- info->cstate = PPTP_CALL_IN_REQ;
- info->pac_call_id = ntohs(*pcid);
- break;
-
- case PPTP_IN_CALL_CONNECT:
- if (reqlen < sizeof(_pptpReq.iccon)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server tells us about incoming call established */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- if (info->sstate != PPTP_CALL_IN_REP
- && info->sstate != PPTP_CALL_IN_CONF) {
- DEBUGP("%s but never sent IN_CALL_REPLY\n",
- strMName[msg]);
- break;
- }
-
- pcid = &pptpReq->iccon.peersCallID;
- cid = &info->pac_call_id;
-
- if (info->pns_call_id != ntohs(*pcid)) {
- DEBUGP("%s for unknown CallID %u\n",
- strMName[msg], ntohs(*cid));
- break;
- }
-
- DEBUGP("%s, PCID=%X\n", strMName[msg], ntohs(*pcid));
- info->cstate = PPTP_CALL_IN_CONF;
-
- /* we expect a GRE connection from PAC to PNS */
- seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
- + sizeof(struct PptpControlHeader)
- + ((void *)pcid - (void *)pptpReq);
-
- if (exp_gre(ct, seq, *cid, *pcid) != 0)
- printk("ip_conntrack_pptp: error during exp_gre\n");
-
- break;
-
- case PPTP_CALL_DISCONNECT_NOTIFY:
- if (reqlen < sizeof(_pptpReq.disc)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms disconnect */
- cid = &pptpReq->disc.callID;
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*cid));
- info->cstate = PPTP_CALL_NONE;
-
- /* untrack this call id, unexpect GRE packets */
- pptp_timeout_related(ct);
- break;
-
- case PPTP_WAN_ERROR_NOTIFY:
- break;
-
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* I don't have to explain these ;) */
- break;
- default:
- DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)
- ? strMName[msg]:strMName[0], msg);
- break;
- }
-
- return NF_ACCEPT;
-
-}
-
-static inline int
-pptp_outbound_pkt(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int ctlhoff,
- size_t datalen,
- struct ip_conntrack *ct)
-{
- struct PptpControlHeader _ctlh, *ctlh;
- unsigned int reqlen;
- union pptp_ctrl_union _pptpReq, *pptpReq;
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- u_int16_t msg, *cid, *pcid;
-
- ctlh = skb_header_pointer(skb, ctlhoff, sizeof(_ctlh), &_ctlh);
- if (!ctlh)
- return NF_ACCEPT;
-
- reqlen = datalen - sizeof(struct pptp_pkt_hdr) - sizeof(_ctlh);
- pptpReq = skb_header_pointer(skb, ctlhoff+sizeof(_ctlh), reqlen,
- &_pptpReq);
- if (!pptpReq)
- return NF_ACCEPT;
-
- msg = ntohs(ctlh->messageType);
- DEBUGP("outbound control message %s\n", strMName[msg]);
-
- switch (msg) {
- case PPTP_START_SESSION_REQUEST:
- /* client requests for new control session */
- if (info->sstate != PPTP_SESSION_NONE) {
- DEBUGP("%s but we already have one",
- strMName[msg]);
- }
- info->sstate = PPTP_SESSION_REQUESTED;
- break;
- case PPTP_STOP_SESSION_REQUEST:
- /* client requests end of control session */
- info->sstate = PPTP_SESSION_STOPREQ;
- break;
-
- case PPTP_OUT_CALL_REQUEST:
- if (reqlen < sizeof(_pptpReq.ocreq)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* client initiating connection to server */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n",
- strMName[msg]);
- break;
- }
- info->cstate = PPTP_CALL_OUT_REQ;
- /* track PNS call id */
- cid = &pptpReq->ocreq.callID;
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*cid));
- info->pns_call_id = ntohs(*cid);
- break;
- case PPTP_IN_CALL_REPLY:
- if (reqlen < sizeof(_pptpReq.icack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* client answers incoming call */
- if (info->cstate != PPTP_CALL_IN_REQ
- && info->cstate != PPTP_CALL_IN_REP) {
- DEBUGP("%s without incall_req\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->icack.resultCode != PPTP_INCALL_ACCEPT) {
- info->cstate = PPTP_CALL_NONE;
- break;
- }
- pcid = &pptpReq->icack.peersCallID;
- if (info->pac_call_id != ntohs(*pcid)) {
- DEBUGP("%s for unknown call %u\n",
- strMName[msg], ntohs(*pcid));
- break;
- }
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*pcid));
- /* part two of the three-way handshake */
- info->cstate = PPTP_CALL_IN_REP;
- info->pns_call_id = ntohs(pptpReq->icack.callID);
- break;
-
- case PPTP_CALL_CLEAR_REQUEST:
- /* client requests hangup of call */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("CLEAR_CALL but no session\n");
- break;
- }
- /* FUTURE: iterate over all calls and check if
- * call ID is valid. We don't do this without newnat,
- * because we only know about last call */
- info->cstate = PPTP_CALL_CLEAR_REQ;
- break;
- case PPTP_SET_LINK_INFO:
- break;
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* I don't have to explain these ;) */
- break;
- default:
- DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)?
- strMName[msg]:strMName[0], msg);
- /* unknown: no need to create GRE masq table entry */
- break;
- }
-
- return NF_ACCEPT;
-}
-
-
-/* track caller id inside control connection, call expect_related */
-static int
-conntrack_pptp_help(struct sk_buff *skb,
- struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-
-{
- struct pptp_pkt_hdr _pptph, *pptph;
-
- struct tcphdr _tcph, *tcph;
- u_int32_t tcplen = skb->len - skb->nh.iph->ihl * 4;
- u_int32_t datalen;
- void *datalimit;
- int dir = CTINFO2DIR(ctinfo);
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- unsigned int nexthdr_off;
-
- int oldsstate, oldcstate;
- int ret;
-
- /* don't do any tracking before tcp handshake complete */
- if (ctinfo != IP_CT_ESTABLISHED
- && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
- DEBUGP("ctinfo = %u, skipping\n", ctinfo);
- return NF_ACCEPT;
- }
-
- nexthdr_off = skb->nh.iph->ihl*4;
- tcph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_tcph),
- &_tcph);
- if (!tcph)
- return NF_ACCEPT;
-
- /* not a complete TCP header? */
- if (tcplen < sizeof(struct tcphdr) || tcplen < tcph->doff * 4) {
- DEBUGP("tcplen = %u\n", tcplen);
- return NF_ACCEPT;
- }
-
-
- datalen = tcplen - tcph->doff * 4;
-
- /* checksum invalid? */
- if (tcp_v4_check(tcph, tcplen, skb->nh.iph->saddr, skb->nh.iph->daddr,
- csum_partial((char *) tcph, tcplen, 0))) {
- printk(KERN_NOTICE __FILE__ ": bad csum\n");
- /* W2K PPTP server sends TCP packets with wrong checksum :(( */
- //return NF_ACCEPT;
- }
-
- if (tcph->fin || tcph->rst) {
- DEBUGP("RST/FIN received, timeouting GRE\n");
- /* can't do this after real newnat */
- info->cstate = PPTP_CALL_NONE;
-
- /* untrack this call id, unexpect GRE packets */
- pptp_timeout_related(ct);
- }
-
- nexthdr_off += tcph->doff*4;
- pptph = skb_header_pointer(skb, skb->nh.iph->ihl*4 + tcph->doff*4,
- sizeof(_pptph), &_pptph);
- if (!pptph) {
- DEBUGP("no full PPTP header, can't track\n");
- return NF_ACCEPT;
- }
-
- datalimit = (void *) pptph + datalen;
-
- /* if it's not a control message we can't do anything with it */
- if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
- ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
- DEBUGP("not a control packet\n");
- return NF_ACCEPT;
- }
-
- oldsstate = info->sstate;
- oldcstate = info->cstate;
-
- LOCK_BH(&ip_pptp_lock);
-
- nexthdr_off += sizeof(_pptph);
- /* FIXME: We just blindly assume that the control connection is always
- * established from PNS->PAC. However, RFC makes no guarantee */
- if (dir == IP_CT_DIR_ORIGINAL)
- /* client -> server (PNS -> PAC) */
- ret = pptp_outbound_pkt(skb, tcph, nexthdr_off, datalen, ct);
- else
- /* server -> client (PAC -> PNS) */
- ret = pptp_inbound_pkt(skb, tcph, nexthdr_off, datalen, ct);
- DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
- oldsstate, info->sstate, oldcstate, info->cstate);
- UNLOCK_BH(&ip_pptp_lock);
-
- return ret;
-}
-
-/* control protocol helper */
-static struct ip_conntrack_helper pptp = {
- .list = { NULL, NULL },
- .name = "pptp",
- .flags = IP_CT_HELPER_F_REUSE_EXPECT,
- .me = THIS_MODULE,
- .max_expected = 2,
- .timeout = 0,
- .tuple = { .src = { .ip = 0,
- .u = { .tcp = { .port =
- __constant_htons(PPTP_CONTROL_PORT) } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = IPPROTO_TCP
- }
- },
- .mask = { .src = { .ip = 0,
- .u = { .tcp = { .port = 0xffff } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = 0xffff
- }
- },
- .help = conntrack_pptp_help
-};
-
-/* ip_conntrack_pptp initialization */
-static int __init init(void)
-{
- int retcode;
-
- DEBUGP(__FILE__ ": registering helper\n");
- if ((retcode = ip_conntrack_helper_register(&pptp))) {
- printk(KERN_ERR "Unable to register conntrack application "
- "helper for pptp: %d\n", retcode);
- return -EIO;
- }
-
- printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION);
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip_conntrack_helper_unregister(&pptp);
- printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION);
-}
-
-module_init(init);
-module_exit(fini);
-
-EXPORT_SYMBOL(ip_pptp_lock);
+++ /dev/null
-#ifndef _IP_CT_PPTP_PRIV_H
-#define _IP_CT_PPTP_PRIV_H
-
-/* PptpControlMessageType names */
-static const char *strMName[] = {
- "UNKNOWN_MESSAGE",
- "START_SESSION_REQUEST",
- "START_SESSION_REPLY",
- "STOP_SESSION_REQUEST",
- "STOP_SESSION_REPLY",
- "ECHO_REQUEST",
- "ECHO_REPLY",
- "OUT_CALL_REQUEST",
- "OUT_CALL_REPLY",
- "IN_CALL_REQUEST",
- "IN_CALL_REPLY",
- "IN_CALL_CONNECT",
- "CALL_CLEAR_REQUEST",
- "CALL_DISCONNECT_NOTIFY",
- "WAN_ERROR_NOTIFY",
- "SET_LINK_INFO"
-};
-
-#endif
+++ /dev/null
-/*
- * ip_conntrack_proto_gre.c - Version 2.0
- *
- * Connection tracking protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/list.h>
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-
-DECLARE_RWLOCK(ip_ct_gre_lock);
-#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_ct_gre_lock)
-#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_ct_gre_lock)
-
-#include <linux/netfilter_ipv4/listhelp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE");
-
-/* shamelessly stolen from ip_conntrack_proto_udp.c */
-#define GRE_TIMEOUT (30*HZ)
-#define GRE_STREAM_TIMEOUT (180*HZ)
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \
- NIPQUAD((x)->src.ip), ntohl((x)->src.u.gre.key), \
- NIPQUAD((x)->dst.ip), ntohl((x)->dst.u.gre.key))
-#else
-#define DEBUGP(x, args...)
-#define DUMP_TUPLE_GRE(x)
-#endif
-
-/* GRE KEYMAP HANDLING FUNCTIONS */
-static LIST_HEAD(gre_keymap_list);
-
-static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km,
- const struct ip_conntrack_tuple *t)
-{
- return ((km->tuple.src.ip == t->src.ip) &&
- (km->tuple.dst.ip == t->dst.ip) &&
- (km->tuple.dst.protonum == t->dst.protonum) &&
- (km->tuple.dst.u.all == t->dst.u.all));
-}
-
-/* look up the source key for a given tuple */
-static u_int32_t gre_keymap_lookup(struct ip_conntrack_tuple *t)
-{
- struct ip_ct_gre_keymap *km;
- u_int32_t key;
-
- READ_LOCK(&ip_ct_gre_lock);
- km = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
- struct ip_ct_gre_keymap *, t);
- if (!km) {
- READ_UNLOCK(&ip_ct_gre_lock);
- return 0;
- }
-
- key = km->tuple.src.u.gre.key;
- READ_UNLOCK(&ip_ct_gre_lock);
-
- return key;
-}
-
-/* add a single keymap entry, associate with specified expect */
-int ip_ct_gre_keymap_add(struct ip_conntrack_expect *exp,
- struct ip_conntrack_tuple *t, int reply)
-{
- struct ip_ct_gre_keymap *km;
-
- km = kmalloc(sizeof(*km), GFP_ATOMIC);
- if (!km)
- return -1;
-
- /* initializing list head should be sufficient */
- memset(km, 0, sizeof(*km));
-
- memcpy(&km->tuple, t, sizeof(*t));
-
- if (!reply)
- exp->proto.gre.keymap_orig = km;
- else
- exp->proto.gre.keymap_reply = km;
-
- DEBUGP("adding new entry %p: ", km);
- DUMP_TUPLE_GRE(&km->tuple);
-
- WRITE_LOCK(&ip_ct_gre_lock);
- list_append(&gre_keymap_list, km);
- WRITE_UNLOCK(&ip_ct_gre_lock);
-
- return 0;
-}
-
-/* change the tuple of a keymap entry (used by nat helper) */
-void ip_ct_gre_keymap_change(struct ip_ct_gre_keymap *km,
- struct ip_conntrack_tuple *t)
-{
- if (!km)
- {
- printk(KERN_WARNING
- "NULL GRE conntrack keymap change requested\n");
- return;
- }
-
- DEBUGP("changing entry %p to: ", km);
- DUMP_TUPLE_GRE(t);
-
- WRITE_LOCK(&ip_ct_gre_lock);
- memcpy(&km->tuple, t, sizeof(km->tuple));
- WRITE_UNLOCK(&ip_ct_gre_lock);
-}
-
-/* destroy the keymap entries associated with specified expect */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack_expect *exp)
-{
- DEBUGP("entering for exp %p\n", exp);
- WRITE_LOCK(&ip_ct_gre_lock);
- if (exp->proto.gre.keymap_orig) {
- DEBUGP("removing %p from list\n", exp->proto.gre.keymap_orig);
- list_del(&exp->proto.gre.keymap_orig->list);
- kfree(exp->proto.gre.keymap_orig);
- exp->proto.gre.keymap_orig = NULL;
- }
- if (exp->proto.gre.keymap_reply) {
- DEBUGP("removing %p from list\n", exp->proto.gre.keymap_reply);
- list_del(&exp->proto.gre.keymap_reply->list);
- kfree(exp->proto.gre.keymap_reply);
- exp->proto.gre.keymap_reply = NULL;
- }
- WRITE_UNLOCK(&ip_ct_gre_lock);
-}
-
-
-/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
-
-/* invert gre part of tuple */
-static int gre_invert_tuple(struct ip_conntrack_tuple *tuple,
- const struct ip_conntrack_tuple *orig)
-{
- tuple->dst.u.gre.key = orig->src.u.gre.key;
- tuple->src.u.gre.key = orig->dst.u.gre.key;
-
- return 1;
-}
-
-/* gre hdr info to tuple */
-static int gre_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct ip_conntrack_tuple *tuple)
-{
- struct gre_hdr _grehdr, *grehdr;
- struct gre_hdr_pptp _pgrehdr, *pgrehdr;
- u_int32_t srckey;
-
- grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
- /* PPTP header is variable length, only need up to the call_id field */
- pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
-
- if (!grehdr || !pgrehdr)
- return 0;
-
- switch (grehdr->version) {
- case GRE_VERSION_1701:
- if (!grehdr->key) {
- DEBUGP("Can't track GRE without key\n");
- return 0;
- }
- tuple->dst.u.gre.key = *(gre_key(grehdr));
- break;
-
- case GRE_VERSION_PPTP:
- if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
- DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
- return 0;
- }
- tuple->dst.u.gre.key = htonl(ntohs(pgrehdr->call_id));
- break;
-
- default:
- printk(KERN_WARNING "unknown GRE version %hu\n",
- grehdr->version);
- return 0;
- }
-
- srckey = gre_keymap_lookup(tuple);
-
- tuple->src.u.gre.key = srckey;
-#if 0
- DEBUGP("found src key %x for tuple ", ntohl(srckey));
- DUMP_TUPLE_GRE(tuple);
-#endif
-
- return 1;
-}
-
-/* print gre part of tuple */
-static unsigned int gre_print_tuple(char *buffer,
- const struct ip_conntrack_tuple *tuple)
-{
- return sprintf(buffer, "srckey=0x%x dstkey=0x%x ",
- ntohl(tuple->src.u.gre.key),
- ntohl(tuple->dst.u.gre.key));
-}
-
-/* print private data for conntrack */
-static unsigned int gre_print_conntrack(char *buffer,
- const struct ip_conntrack *ct)
-{
- return sprintf(buffer, "timeout=%u, stream_timeout=%u ",
- (ct->proto.gre.timeout / HZ),
- (ct->proto.gre.stream_timeout / HZ));
-}
-
-/* Returns verdict for packet, and may modify conntrack */
-static int gre_packet(struct ip_conntrack *ct,
- const struct sk_buff *skb,
- enum ip_conntrack_info conntrackinfo)
-{
- /* If we've seen traffic both ways, this is a GRE connection.
- * Extend timeout. */
- if (ct->status & IPS_SEEN_REPLY) {
- ip_ct_refresh_acct(ct, conntrackinfo, skb,
- ct->proto.gre.stream_timeout);
- /* Also, more likely to be important, and not a probe. */
- set_bit(IPS_ASSURED_BIT, &ct->status);
- } else
- ip_ct_refresh_acct(ct, conntrackinfo, skb,
- ct->proto.gre.timeout);
-
- return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int gre_new(struct ip_conntrack *ct,
- const struct sk_buff *skb)
-{
- DEBUGP(": ");
- DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-
- /* initialize to sane value. Ideally a conntrack helper
- * (e.g. in case of pptp) is increasing them */
- ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
- ct->proto.gre.timeout = GRE_TIMEOUT;
-
- return 1;
-}
-
-/* Called when a conntrack entry has already been removed from the hashes
- * and is about to be deleted from memory */
-static void gre_destroy(struct ip_conntrack *ct)
-{
- struct ip_conntrack_expect *master = ct->master;
-
- DEBUGP(" entering\n");
-
- if (!master) {
- DEBUGP("no master exp for ct %p\n", ct);
- return;
- }
-
- ip_ct_gre_keymap_destroy(master);
-}
-
-/* protocol helper struct */
-static struct ip_conntrack_protocol gre = {
- .proto = IPPROTO_GRE,
- .name = "gre",
- .pkt_to_tuple = gre_pkt_to_tuple,
- .invert_tuple = gre_invert_tuple,
- .print_tuple = gre_print_tuple,
- .print_conntrack = gre_print_conntrack,
- .packet = gre_packet,
- .new = gre_new,
- .destroy = gre_destroy,
- .exp_matches_pkt = NULL,
- .me = THIS_MODULE
-};
-
-/* ip_conntrack_proto_gre initialization */
-static int __init init(void)
-{
- int retcode;
-
- if ((retcode = ip_conntrack_protocol_register(&gre))) {
- printk(KERN_ERR "Unable to register conntrack protocol "
- "helper for gre: %d\n", retcode);
- return -EIO;
- }
-
- return 0;
-}
-
-static void __exit fini(void)
-{
- struct list_head *pos, *n;
-
- /* delete all keymap entries */
- WRITE_LOCK(&ip_ct_gre_lock);
- list_for_each_safe(pos, n, &gre_keymap_list) {
- DEBUGP("deleting keymap %p at module unload time\n", pos);
- list_del(pos);
- kfree(pos);
- }
- WRITE_UNLOCK(&ip_ct_gre_lock);
-
- ip_conntrack_protocol_unregister(&gre);
-}
-
-EXPORT_SYMBOL(ip_ct_gre_keymap_add);
-EXPORT_SYMBOL(ip_ct_gre_keymap_change);
-EXPORT_SYMBOL(ip_ct_gre_keymap_destroy);
-
-module_init(init);
-module_exit(fini);
len += print_tuple(buffer + len,
&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
proto);
- len += sprintf(buffer + len, "xid=%d ", conntrack->xid[IP_CT_DIR_ORIGINAL]);
if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
len += sprintf(buffer + len, "[UNREPLIED] ");
len += print_tuple(buffer + len,
&conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto);
- len += sprintf(buffer + len, "xid=%d ", conntrack->xid[IP_CT_DIR_REPLY]);
if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
len += sprintf(buffer + len, "[ASSURED] ");
len += sprintf(buffer + len, "use=%u ",
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
+#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
DECLARE_LOCK_EXTERN(ip_ftp_lock);
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("IRC (DCC) NAT helper");
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
+#endif
/* protects irc part of conntracks */
DECLARE_LOCK_EXTERN(ip_irc_lock);
+++ /dev/null
-/*
- * ip_nat_pptp.c - Version 2.0
- *
- * NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft. PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702. Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * TODO: - Support for multiple calls within one session
- * (needs netfilter newnat code)
- * - NAT to a unique tuple, not to TCP source port
- * (needs netfilter tuple reservation)
- *
- * Changes:
- * 2002-02-10 - Version 1.3
- * - Use ip_nat_mangle_tcp_packet() because of cloned skb's
- * in local connections (Philip Craig <philipc@snapgear.com>)
- * - add checks for magicCookie and pptp version
- * - make argument list of pptp_{out,in}bound_packet() shorter
- * - move to C99 style initializers
- * - print version number at module loadtime
- * 2003-09-22 - Version 1.5
- * - use SNATed tcp sourceport as callid, since we get called before
- * TCP header is mangled (Philip Craig <philipc@snapgear.com>)
- * 2004-10-22 - Version 2.0
- * - kernel 2.6.x version
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_pptp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_NAT_PPTP_VERSION "2.0"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
-
-
-#if 0
-#include "ip_conntrack_pptp_priv.h"
-#define DEBUGP(format, args...) printk(KERN_DEBUG __FILE__ ":" __FUNCTION__ \
- ": " format, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static unsigned int
-pptp_nat_expected(struct sk_buff **pskb,
- unsigned int hooknum,
- struct ip_conntrack *ct,
- struct ip_nat_info *info)
-{
- struct ip_conntrack *master = master_ct(ct);
- struct ip_nat_multi_range mr;
- struct ip_ct_pptp_master *ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info;
- u_int32_t newip, newcid;
- int ret;
-
- IP_NF_ASSERT(info);
- IP_NF_ASSERT(master);
- IP_NF_ASSERT(!(info->initialized & (1 << HOOK2MANIP(hooknum))));
-
- DEBUGP("we have a connection!\n");
-
- LOCK_BH(&ip_pptp_lock);
- ct_pptp_info = &master->help.ct_pptp_info;
- nat_pptp_info = &master->nat.help.nat_pptp_info;
-
- /* need to alter GRE tuple because conntrack expectfn() used 'wrong'
- * (unmanipulated) values */
- if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) {
- DEBUGP("completing tuples with NAT info \n");
- /* we can do this, since we're unconfirmed */
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(ct_pptp_info->pac_call_id)) {
- /* assume PNS->PAC */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(nat_pptp_info->pns_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(nat_pptp_info->pns_call_id);
- newip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
- newcid = htonl(nat_pptp_info->pac_call_id);
- } else {
- /* assume PAC->PNS */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(nat_pptp_info->pac_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(nat_pptp_info->pac_call_id);
- newip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
- newcid = htonl(nat_pptp_info->pns_call_id);
- }
- } else {
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(ct_pptp_info->pac_call_id)) {
- /* assume PNS->PAC */
- newip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- newcid = htonl(ct_pptp_info->pns_call_id);
- }
- else {
- /* assume PAC->PNS */
- newip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
- newcid = htonl(ct_pptp_info->pac_call_id);
- }
- }
-
- mr.rangesize = 1;
- mr.range[0].flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED;
- mr.range[0].min_ip = mr.range[0].max_ip = newip;
- mr.range[0].min = mr.range[0].max =
- ((union ip_conntrack_manip_proto ) { newcid });
- DEBUGP("change ip to %u.%u.%u.%u\n",
- NIPQUAD(newip));
- DEBUGP("change key to 0x%x\n", ntohl(newcid));
- ret = ip_nat_setup_info(ct, &mr, hooknum);
-
- UNLOCK_BH(&ip_pptp_lock);
-
- return ret;
-
-}
-
-/* outbound packets == from PNS to PAC */
-static inline unsigned int
-pptp_outbound_pkt(struct sk_buff **pskb,
- struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- struct ip_conntrack_expect *exp)
-
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- struct pptp_pkt_hdr *pptph = (struct pptp_pkt_hdr *)
- ((void *)tcph + tcph->doff*4);
-
- struct PptpControlHeader *ctlh;
- union pptp_ctrl_union *pptpReq;
- struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
- u_int16_t msg, *cid = NULL, new_callid;
-
- /* FIXME: size checks !!! */
- ctlh = (struct PptpControlHeader *) ((void *) pptph + sizeof(*pptph));
- pptpReq = (void *) ((void *) ctlh + sizeof(*ctlh));
-
- new_callid = htons(ct_pptp_info->pns_call_id);
-
- switch (msg = ntohs(ctlh->messageType)) {
- case PPTP_OUT_CALL_REQUEST:
- cid = &pptpReq->ocreq.callID;
- /* FIXME: ideally we would want to reserve a call ID
- * here. current netfilter NAT core is not able to do
- * this :( For now we use TCP source port. This breaks
- * multiple calls within one control session */
-
- /* save original call ID in nat_info */
- nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
-
- /* don't use tcph->source since we are at a DSTmanip
- * hook (e.g. PREROUTING) and pkt is not mangled yet */
- new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
-
- /* save new call ID in ct info */
- ct_pptp_info->pns_call_id = ntohs(new_callid);
- break;
- case PPTP_IN_CALL_REPLY:
- cid = &pptpReq->icreq.callID;
- break;
- case PPTP_CALL_CLEAR_REQUEST:
- cid = &pptpReq->clrreq.callID;
- break;
- default:
- DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
- (msg <= PPTP_MSG_MAX)? strMName[msg]:strMName[0]);
- /* fall through */
-
- case PPTP_SET_LINK_INFO:
- /* only need to NAT in case PAC is behind NAT box */
- case PPTP_START_SESSION_REQUEST:
- case PPTP_START_SESSION_REPLY:
- case PPTP_STOP_SESSION_REQUEST:
- case PPTP_STOP_SESSION_REPLY:
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* no need to alter packet */
- return NF_ACCEPT;
- }
-
- IP_NF_ASSERT(cid);
-
- DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_callid));
-
- /* mangle packet */
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, (void *)cid - (void *)pptph,
- sizeof(new_callid), (char *)&new_callid,
- sizeof(new_callid));
-
- return NF_ACCEPT;
-}
-
-/* inbound packets == from PAC to PNS */
-static inline unsigned int
-pptp_inbound_pkt(struct sk_buff **pskb,
- struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- struct ip_conntrack_expect *oldexp)
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- struct pptp_pkt_hdr *pptph = (struct pptp_pkt_hdr *)
- ((void *)tcph + tcph->doff*4);
-
- struct PptpControlHeader *ctlh;
- union pptp_ctrl_union *pptpReq;
- struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
- u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
- u_int32_t old_dst_ip;
-
- struct ip_conntrack_tuple t, inv_t;
- struct ip_conntrack_tuple *orig_t, *reply_t;
-
- /* FIXME: size checks !!! */
- ctlh = (struct PptpControlHeader *) ((void *) pptph + sizeof(*pptph));
- pptpReq = (void *) ((void *) ctlh + sizeof(*ctlh));
-
- new_pcid = htons(nat_pptp_info->pns_call_id);
-
- switch (msg = ntohs(ctlh->messageType)) {
- case PPTP_OUT_CALL_REPLY:
- pcid = &pptpReq->ocack.peersCallID;
- cid = &pptpReq->ocack.callID;
- if (!oldexp) {
- DEBUGP("outcall but no expectation\n");
- break;
- }
- old_dst_ip = oldexp->tuple.dst.ip;
- t = oldexp->tuple;
- invert_tuplepr(&inv_t, &t);
-
- /* save original PAC call ID in nat_info */
- nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
-
- /* alter expectation */
- orig_t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- reply_t = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- if (t.src.ip == orig_t->src.ip && t.dst.ip == orig_t->dst.ip) {
- /* expectation for PNS->PAC direction */
- t.src.u.gre.key = htonl(nat_pptp_info->pns_call_id);
- t.dst.u.gre.key = htonl(ct_pptp_info->pac_call_id);
- inv_t.src.ip = reply_t->src.ip;
- inv_t.dst.ip = reply_t->dst.ip;
- inv_t.src.u.gre.key = htonl(nat_pptp_info->pac_call_id);
- inv_t.dst.u.gre.key = htonl(ct_pptp_info->pns_call_id);
- } else {
- /* expectation for PAC->PNS direction */
- t.src.u.gre.key = htonl(nat_pptp_info->pac_call_id);
- t.dst.u.gre.key = htonl(ct_pptp_info->pns_call_id);
- inv_t.src.ip = orig_t->src.ip;
- inv_t.dst.ip = orig_t->dst.ip;
- inv_t.src.u.gre.key = htonl(nat_pptp_info->pns_call_id);
- inv_t.dst.u.gre.key = htonl(ct_pptp_info->pac_call_id);
- }
-
- if (!ip_conntrack_change_expect(oldexp, &t)) {
- DEBUGP("successfully changed expect\n");
- } else {
- DEBUGP("can't change expect\n");
- }
- ip_ct_gre_keymap_change(oldexp->proto.gre.keymap_orig, &t);
- ip_ct_gre_keymap_change(oldexp->proto.gre.keymap_reply, &inv_t);
- break;
- case PPTP_IN_CALL_CONNECT:
- pcid = &pptpReq->iccon.peersCallID;
- if (!oldexp)
- break;
- old_dst_ip = oldexp->tuple.dst.ip;
- t = oldexp->tuple;
-
- /* alter expectation, no need for callID */
- if (t.dst.ip == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip) {
- /* expectation for PNS->PAC direction */
- t.src.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- } else {
- /* expectation for PAC->PNS direction */
- t.dst.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- }
-
- if (!ip_conntrack_change_expect(oldexp, &t)) {
- DEBUGP("successfully changed expect\n");
- } else {
- DEBUGP("can't change expect\n");
- }
- break;
- case PPTP_IN_CALL_REQUEST:
- /* only need to nat in case PAC is behind NAT box */
- break;
- case PPTP_WAN_ERROR_NOTIFY:
- pcid = &pptpReq->wanerr.peersCallID;
- break;
- case PPTP_CALL_DISCONNECT_NOTIFY:
- pcid = &pptpReq->disc.callID;
- break;
-
- default:
- DEBUGP("unknown inbound packet %s\n",
- (msg <= PPTP_MSG_MAX)? strMName[msg]:strMName[0]);
- /* fall through */
-
- case PPTP_START_SESSION_REQUEST:
- case PPTP_START_SESSION_REPLY:
- case PPTP_STOP_SESSION_REQUEST:
- case PPTP_STOP_SESSION_REPLY:
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* no need to alter packet */
- return NF_ACCEPT;
- }
-
- /* mangle packet */
- IP_NF_ASSERT(pcid);
- DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
- ntohs(*pcid), ntohs(new_pcid));
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, (void *)pcid - (void *)pptph,
- sizeof(new_pcid), (char *)&new_pcid,
- sizeof(new_pcid));
-
- if (new_cid) {
- IP_NF_ASSERT(cid);
- DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_cid));
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
- (void *)cid - (void *)pptph,
- sizeof(new_cid), (char *)&new_cid,
- sizeof(new_cid));
- }
-
- /* great, at least we don't need to resize packets */
- return NF_ACCEPT;
-}
-
-
-static unsigned int tcp_help(struct ip_conntrack *ct,
- struct ip_conntrack_expect *exp,
- struct ip_nat_info *info,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum, struct sk_buff **pskb)
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- unsigned int datalen = (*pskb)->len - iph->ihl*4 - tcph->doff*4;
- struct pptp_pkt_hdr *pptph;
-
- int dir;
-
- DEBUGP("entering\n");
-
- /* Only mangle things once: DST for original direction
- and SRC for reply direction. */
- dir = CTINFO2DIR(ctinfo);
- if (!((HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
- && dir == IP_CT_DIR_ORIGINAL)
- || (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST
- && dir == IP_CT_DIR_REPLY))) {
- DEBUGP("Not touching dir %s at hook %s\n",
- dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY",
- hooknum == NF_IP_POST_ROUTING ? "POSTROUTING"
- : hooknum == NF_IP_PRE_ROUTING ? "PREROUTING"
- : hooknum == NF_IP_LOCAL_OUT ? "OUTPUT"
- : hooknum == NF_IP_LOCAL_IN ? "INPUT" : "???");
- return NF_ACCEPT;
- }
-
- /* if packet is too small, just skip it */
- if (datalen < sizeof(struct pptp_pkt_hdr)+
- sizeof(struct PptpControlHeader)) {
- DEBUGP("pptp packet too short\n");
- return NF_ACCEPT;
- }
-
- pptph = (struct pptp_pkt_hdr *) ((void *)tcph + tcph->doff*4);
-
- /* if it's not a control message, we can't handle it */
- if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
- ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
- DEBUGP("not a pptp control packet\n");
- return NF_ACCEPT;
- }
-
- LOCK_BH(&ip_pptp_lock);
-
- if (dir == IP_CT_DIR_ORIGINAL) {
- /* reuqests sent by client to server (PNS->PAC) */
- pptp_outbound_pkt(pskb, ct, ctinfo, exp);
- } else {
- /* response from the server to the client (PAC->PNS) */
- pptp_inbound_pkt(pskb, ct, ctinfo, exp);
- }
-
- UNLOCK_BH(&ip_pptp_lock);
-
- return NF_ACCEPT;
-}
-
-/* nat helper struct for control connection */
-static struct ip_nat_helper pptp_tcp_helper = {
- .list = { NULL, NULL },
- .name = "pptp",
- .flags = IP_NAT_HELPER_F_ALWAYS,
- .me = THIS_MODULE,
- .tuple = { .src = { .ip = 0,
- .u = { .tcp = { .port =
- __constant_htons(PPTP_CONTROL_PORT) }
- }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = IPPROTO_TCP
- }
- },
-
- .mask = { .src = { .ip = 0,
- .u = { .tcp = { .port = 0xFFFF } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = 0xFFFF
- }
- },
- .help = tcp_help,
- .expect = pptp_nat_expected
-};
-
-
-static int __init init(void)
-{
- DEBUGP("%s: registering NAT helper\n", __FILE__);
- if (ip_nat_helper_register(&pptp_tcp_helper)) {
- printk(KERN_ERR "Unable to register NAT application helper "
- "for pptp\n");
- return -EIO;
- }
-
- printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
- return 0;
-}
-
-static void __exit fini(void)
-{
- DEBUGP("cleanup_module\n" );
- ip_nat_helper_unregister(&pptp_tcp_helper);
- printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
-}
-
-module_init(init);
-module_exit(fini);
+++ /dev/null
-/*
- * ip_nat_proto_gre.c - Version 2.0
- *
- * NAT protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG __FILE__ ":" __FUNCTION__ \
- ": " format, ## args)
-#else
-#define DEBUGP(x, args...)
-#endif
-
-/* is key in given range between min and max */
-static int
-gre_in_range(const struct ip_conntrack_tuple *tuple,
- enum ip_nat_manip_type maniptype,
- const union ip_conntrack_manip_proto *min,
- const union ip_conntrack_manip_proto *max)
-{
- u_int32_t key;
-
- if (maniptype == IP_NAT_MANIP_SRC)
- key = tuple->src.u.gre.key;
- else
- key = tuple->dst.u.gre.key;
-
- return ntohl(key) >= ntohl(min->gre.key)
- && ntohl(key) <= ntohl(max->gre.key);
-}
-
-/* generate unique tuple ... */
-static int
-gre_unique_tuple(struct ip_conntrack_tuple *tuple,
- const struct ip_nat_range *range,
- enum ip_nat_manip_type maniptype,
- const struct ip_conntrack *conntrack)
-{
- u_int32_t min, i, range_size;
- u_int32_t key = 0, *keyptr;
-
- if (maniptype == IP_NAT_MANIP_SRC)
- keyptr = &tuple->src.u.gre.key;
- else
- keyptr = &tuple->dst.u.gre.key;
-
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
- DEBUGP("%p: NATing GRE PPTP\n", conntrack);
- min = 1;
- range_size = 0xffff;
- } else {
- min = ntohl(range->min.gre.key);
- range_size = ntohl(range->max.gre.key) - min + 1;
- }
-
- DEBUGP("min = %u, range_size = %u\n", min, range_size);
-
- for (i = 0; i < range_size; i++, key++) {
- *keyptr = htonl(min + key % range_size);
- if (!ip_nat_used_tuple(tuple, conntrack))
- return 1;
- }
-
- DEBUGP("%p: no NAT mapping\n", conntrack);
-
- return 0;
-}
-
-/* manipulate a GRE packet according to maniptype */
-static int
-gre_manip_pkt(struct sk_buff **pskb,
- unsigned int hdroff,
- const struct ip_conntrack_manip *manip,
- enum ip_nat_manip_type maniptype)
-{
- struct gre_hdr *greh;
- struct gre_hdr_pptp *pgreh;
-
- if (!skb_ip_make_writable(pskb, hdroff + sizeof(*pgreh)))
- return 0;
-
- greh = (void *)(*pskb)->data + hdroff;
- pgreh = (struct gre_hdr_pptp *) greh;
-
- /* we only have destination manip of a packet, since 'source key'
- * is not present in the packet itself */
- if (maniptype == IP_NAT_MANIP_DST) {
- /* key manipulation is always dest */
- switch (greh->version) {
- case 0:
- if (!greh->key) {
- DEBUGP("can't nat GRE w/o key\n");
- break;
- }
- if (greh->csum) {
- /* FIXME: Never tested this code... */
- *(gre_csum(greh)) =
- ip_nat_cheat_check(~*(gre_key(greh)),
- manip->u.gre.key,
- *(gre_csum(greh)));
- }
- *(gre_key(greh)) = manip->u.gre.key;
- break;
- case GRE_VERSION_PPTP:
- DEBUGP("call_id -> 0x%04x\n",
- ntohl(manip->u.gre.key));
- pgreh->call_id = htons(ntohl(manip->u.gre.key));
- break;
- default:
- DEBUGP("can't nat unknown GRE version\n");
- return 0;
- break;
- }
- }
- return 1;
-}
-
-/* print out a nat tuple */
-static unsigned int
-gre_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.gre.key)
- len += sprintf(buffer + len, "srckey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- if (mask->dst.u.gre.key)
- len += sprintf(buffer + len, "dstkey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- return len;
-}
-
-/* print a range of keys */
-static unsigned int
-gre_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.gre.key != 0
- || range->max.gre.key != 0xFFFF) {
- if (range->min.gre.key == range->max.gre.key)
- return sprintf(buffer, "key 0x%x ",
- ntohl(range->min.gre.key));
- else
- return sprintf(buffer, "keys 0x%u-0x%u ",
- ntohl(range->min.gre.key),
- ntohl(range->max.gre.key));
- } else
- return 0;
-}
-
-/* nat helper struct */
-static struct ip_nat_protocol gre = {
- .name = "GRE",
- .protonum = IPPROTO_GRE,
- .manip_pkt = gre_manip_pkt,
- .in_range = gre_in_range,
- .unique_tuple = gre_unique_tuple,
- .print = gre_print,
- .print_range = gre_print_range
-};
-
-static int __init init(void)
-{
- if (ip_nat_protocol_register(&gre))
- return -EIO;
-
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip_nat_protocol_unregister(&gre);
-}
-
-module_init(init);
-module_exit(fini);
return 1;
}
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(unsigned char *csum,
- const unsigned char *optr,
- const unsigned char *nptr,
- int odd)
-{
- long x, old, new;
-
- x = csum[0] * 256 + csum[1];
-
- x =~ x & 0xFFFF;
-
- if (odd) old = optr[0] * 256;
- else old = optr[0];
-
- x -= old & 0xFFFF;
- if (x <= 0) {
- x--;
- x &= 0xFFFF;
- }
-
- if (odd) new = nptr[0] * 256;
- else new = nptr[0];
-
- x += new & 0xFFFF;
- if (x & 0x10000) {
- x++;
- x &= 0xFFFF;
- }
-
- x =~ x & 0xFFFF;
- csum[0] = x / 256;
- csum[1] = x & 0xFF;
-}
-
-/*
- * Mangle IP address.
- * - begin points to the start of the snmp messgae
- * - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
- unsigned char *addr,
- const struct oct1_map *map,
- u_int16_t *check)
-{
- if (map->from == NOCT1(*addr)) {
- u_int32_t old;
-
- if (debug)
- memcpy(&old, (unsigned char *)addr, sizeof(old));
-
- *addr = map->to;
-
- /* Update UDP checksum if being used */
- if (*check) {
- unsigned char odd = !((addr - begin) % 2);
-
- fast_csum((unsigned char *)check,
- &map->from, &map->to, odd);
-
- }
-
- if (debug)
- printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
- "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
- }
-}
-
static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
struct snmp_v1_trap *trap,
const struct oct1_map *map,
printk("\n");
}
+/*
+ * Fast checksum update for possibly oddly-aligned UDP byte, from the
+ * code example in the draft.
+ */
+static void fast_csum(unsigned char *csum,
+ const unsigned char *optr,
+ const unsigned char *nptr,
+ int odd)
+{
+ long x, old, new;
+
+ x = csum[0] * 256 + csum[1];
+
+ x =~ x & 0xFFFF;
+
+ if (odd) old = optr[0] * 256;
+ else old = optr[0];
+
+ x -= old & 0xFFFF;
+ if (x <= 0) {
+ x--;
+ x &= 0xFFFF;
+ }
+
+ if (odd) new = nptr[0] * 256;
+ else new = nptr[0];
+
+ x += new & 0xFFFF;
+ if (x & 0x10000) {
+ x++;
+ x &= 0xFFFF;
+ }
+
+ x =~ x & 0xFFFF;
+ csum[0] = x / 256;
+ csum[1] = x & 0xFF;
+}
+
+/*
+ * Mangle IP address.
+ * - begin points to the start of the snmp messgae
+ * - addr points to the start of the address
+ */
+static inline void mangle_address(unsigned char *begin,
+ unsigned char *addr,
+ const struct oct1_map *map,
+ u_int16_t *check)
+{
+ if (map->from == NOCT1(*addr)) {
+ u_int32_t old;
+
+ if (debug)
+ memcpy(&old, (unsigned char *)addr, sizeof(old));
+
+ *addr = map->to;
+
+ /* Update UDP checksum if being used */
+ if (*check) {
+ unsigned char odd = !((addr - begin) % 2);
+
+ fast_csum((unsigned char *)check,
+ &map->from, &map->to, odd);
+
+ }
+
+ if (debug)
+ printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
+ "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
+ }
+}
+
/*
* Parse and mangle SNMP message according to mapping.
* (And this is the fucking 'basic' method).
static int ports[MAX_PORTS];
static int ports_c = 0;
+#ifdef MODULE_PARM
MODULE_PARM(ports,"1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
+#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
return 0;
}
-#if 0
if (strcmp(tablename, "mangle") != 0) {
printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
return 0;
}
-#endif
return 1;
}
};
static unsigned long
-fold_field(void *mib[], int offt)
+__fold_field(void *mib[], int offt)
{
unsigned long res = 0;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
- res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
+ offt));
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) +
+ offt));
}
return res;
}
-/* snmp items */
-static struct snmp_mib snmp4_ipstats_list[] = {
- SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES),
- SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
- SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
- SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
- SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
- SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
- SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
- SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS),
- SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
- SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
- SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
- SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS),
- SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS),
- SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS),
- SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS),
- SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
- SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_icmp_list[] = {
- SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
- SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
- SNMP_MIB_ITEM("InDestUnreachs", ICMP_MIB_INDESTUNREACHS),
- SNMP_MIB_ITEM("InTimeExcds", ICMP_MIB_INTIMEEXCDS),
- SNMP_MIB_ITEM("InParmProbs", ICMP_MIB_INPARMPROBS),
- SNMP_MIB_ITEM("InSrcQuenchs", ICMP_MIB_INSRCQUENCHS),
- SNMP_MIB_ITEM("InRedirects", ICMP_MIB_INREDIRECTS),
- SNMP_MIB_ITEM("InEchos", ICMP_MIB_INECHOS),
- SNMP_MIB_ITEM("InEchoReps", ICMP_MIB_INECHOREPS),
- SNMP_MIB_ITEM("InTimestamps", ICMP_MIB_INTIMESTAMPS),
- SNMP_MIB_ITEM("InTimestampReps", ICMP_MIB_INTIMESTAMPREPS),
- SNMP_MIB_ITEM("InAddrMasks", ICMP_MIB_INADDRMASKS),
- SNMP_MIB_ITEM("InAddrMaskReps", ICMP_MIB_INADDRMASKREPS),
- SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
- SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
- SNMP_MIB_ITEM("OutDestUnreachs", ICMP_MIB_OUTDESTUNREACHS),
- SNMP_MIB_ITEM("OutTimeExcds", ICMP_MIB_OUTTIMEEXCDS),
- SNMP_MIB_ITEM("OutParmProbs", ICMP_MIB_OUTPARMPROBS),
- SNMP_MIB_ITEM("OutSrcQuenchs", ICMP_MIB_OUTSRCQUENCHS),
- SNMP_MIB_ITEM("OutRedirects", ICMP_MIB_OUTREDIRECTS),
- SNMP_MIB_ITEM("OutEchos", ICMP_MIB_OUTECHOS),
- SNMP_MIB_ITEM("OutEchoReps", ICMP_MIB_OUTECHOREPS),
- SNMP_MIB_ITEM("OutTimestamps", ICMP_MIB_OUTTIMESTAMPS),
- SNMP_MIB_ITEM("OutTimestampReps", ICMP_MIB_OUTTIMESTAMPREPS),
- SNMP_MIB_ITEM("OutAddrMasks", ICMP_MIB_OUTADDRMASKS),
- SNMP_MIB_ITEM("OutAddrMaskReps", ICMP_MIB_OUTADDRMASKREPS),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_tcp_list[] = {
- SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM),
- SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN),
- SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX),
- SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN),
- SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS),
- SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS),
- SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS),
- SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS),
- SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB),
- SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS),
- SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS),
- SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS),
- SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS),
- SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_udp_list[] = {
- SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS),
- SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS),
- SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS),
- SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS),
- SNMP_MIB_SENTINEL
-};
+#define fold_field(_mib, _nr) __fold_field(_mib, (sizeof(unsigned long) * (_nr)))
-static struct snmp_mib snmp4_net_list[] = {
- SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT),
- SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV),
- SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED),
- SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS),
- SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED),
- SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED),
- SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED),
- SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS),
- SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS),
- SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER),
- SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
- SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
- SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
- SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED),
- SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
- SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
- SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
- SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED),
- SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST),
- SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS),
- SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS),
- SNMP_MIB_ITEM("TCPPrequeued", LINUX_MIB_TCPPREQUEUED),
- SNMP_MIB_ITEM("TCPDirectCopyFromBacklog", LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG),
- SNMP_MIB_ITEM("TCPDirectCopyFromPrequeue", LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE),
- SNMP_MIB_ITEM("TCPPrequeueDropped", LINUX_MIB_TCPPREQUEUEDROPPED),
- SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS),
- SNMP_MIB_ITEM("TCPHPHitsToUser", LINUX_MIB_TCPHPHITSTOUSER),
- SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS),
- SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS),
- SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY),
- SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY),
- SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING),
- SNMP_MIB_ITEM("TCPFACKReorder", LINUX_MIB_TCPFACKREORDER),
- SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER),
- SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER),
- SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER),
- SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO),
- SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
- SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
- SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
- SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
- SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
- SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
- SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
- SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES),
- SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS),
- SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS),
- SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS),
- SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS),
- SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
- SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
- SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED),
- SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED),
- SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT),
- SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
- SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
- SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
- SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
- SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
- SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
- SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT),
- SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
- SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
- SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
- SNMP_MIB_SENTINEL
+/* snmp items */
+static struct snmp_item snmp4_ipstats_list[] = {
+#define __SNMP_GEN(x,y) SNMP_ITEM(struct ipstats_mib, x, y)
+#define SNMP_GEN(x) __SNMP_GEN(x, #x)
+ SNMP_GEN(InReceives),
+ SNMP_GEN(InHdrErrors),
+ SNMP_GEN(InAddrErrors),
+ __SNMP_GEN(OutForwDatagrams,"ForwDatagrams"), /* for backward compatibility */
+ SNMP_GEN(InUnknownProtos),
+ SNMP_GEN(InDiscards),
+ SNMP_GEN(InDelivers),
+ SNMP_GEN(OutRequests),
+ SNMP_GEN(OutDiscards),
+ SNMP_GEN(OutNoRoutes),
+ SNMP_GEN(ReasmTimeout),
+ SNMP_GEN(ReasmReqds),
+ SNMP_GEN(ReasmOKs),
+ SNMP_GEN(ReasmFails),
+ SNMP_GEN(FragOKs),
+ SNMP_GEN(FragFails),
+ SNMP_GEN(FragCreates),
+ SNMP_ITEM_SENTINEL
+#undef SNMP_GEN
};
/*
{
int i;
- seq_puts(seq, "Ip: Forwarding DefaultTTL");
+ seq_printf(seq, "Ip: Forwarding DefaultTTL");
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- fold_field((void **) ip_statistics,
- snmp4_ipstats_list[i].entry));
-
- seq_puts(seq, "\nIcmp:");
- for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_icmp_list[i].name);
-
- seq_puts(seq, "\nIcmp:");
- for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
+ __fold_field((void **) ip_statistics,
+ snmp4_ipstats_list[i].offset));
+
+ seq_printf(seq, "\nIcmp: InMsgs InErrors InDestUnreachs InTimeExcds "
+ "InParmProbs InSrcQuenchs InRedirects InEchos "
+ "InEchoReps InTimestamps InTimestampReps InAddrMasks "
+ "InAddrMaskReps OutMsgs OutErrors OutDestUnreachs "
+ "OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects "
+ "OutEchos OutEchoReps OutTimestamps OutTimestampReps "
+ "OutAddrMasks OutAddrMaskReps\nIcmp:");
+
+ for (i = 0;
+ i < offsetof(struct icmp_mib, dummy) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
- fold_field((void **) icmp_statistics,
- snmp4_icmp_list[i].entry));
-
- seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_tcp_list[i].name);
-
- seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++) {
- /* MaxConn field is signed, RFC 2012 */
- if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
- seq_printf(seq, " %ld",
- fold_field((void **) tcp_statistics,
- snmp4_tcp_list[i].entry));
+ fold_field((void **) icmp_statistics, i));
+
+ seq_printf(seq, "\nTcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens "
+ "PassiveOpens AttemptFails EstabResets CurrEstab "
+ "InSegs OutSegs RetransSegs InErrs OutRsts\nTcp:");
+
+ for (i = 0;
+ i < offsetof(struct tcp_mib, __pad) / sizeof(unsigned long); i++) {
+ if (i == (offsetof(struct tcp_mib, TcpMaxConn) / sizeof(unsigned long)))
+ /* MaxConn field is negative, RFC 2012 */
+ seq_printf(seq, " %ld",
+ fold_field((void **) tcp_statistics, i));
else
- seq_printf(seq, " %lu",
- fold_field((void **) tcp_statistics,
- snmp4_tcp_list[i].entry));
+ seq_printf(seq, " %lu",
+ fold_field((void **) tcp_statistics, i));
}
- seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_udp_list[i].name);
+ seq_printf(seq, "\nUdp: InDatagrams NoPorts InErrors OutDatagrams\n"
+ "Udp:");
- seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- fold_field((void **) udp_statistics,
- snmp4_udp_list[i].entry));
+ for (i = 0;
+ i < offsetof(struct udp_mib, __pad) / sizeof(unsigned long); i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) udp_statistics, i));
seq_putc(seq, '\n');
return 0;
{
int i;
- seq_puts(seq, "\nTcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_net_list[i].name);
-
- seq_puts(seq, "\nTcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- fold_field((void **) net_statistics,
- snmp4_net_list[i].entry));
-
+ seq_puts(seq, "TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed"
+ " EmbryonicRsts PruneCalled RcvPruned OfoPruned"
+ " OutOfWindowIcmps LockDroppedIcmps ArpFilter"
+ " TW TWRecycled TWKilled"
+ " PAWSPassive PAWSActive PAWSEstab"
+ " DelayedACKs DelayedACKLocked DelayedACKLost"
+ " ListenOverflows ListenDrops"
+ " TCPPrequeued TCPDirectCopyFromBacklog"
+ " TCPDirectCopyFromPrequeue TCPPrequeueDropped"
+ " TCPHPHits TCPHPHitsToUser"
+ " TCPPureAcks TCPHPAcks"
+ " TCPRenoRecovery TCPSackRecovery"
+ " TCPSACKReneging"
+ " TCPFACKReorder TCPSACKReorder TCPRenoReorder"
+ " TCPTSReorder"
+ " TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo"
+ " TCPLoss TCPLostRetransmit"
+ " TCPRenoFailures TCPSackFailures TCPLossFailures"
+ " TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans"
+ " TCPTimeouts"
+ " TCPRenoRecoveryFail TCPSackRecoveryFail"
+ " TCPSchedulerFailed TCPRcvCollapsed"
+ " TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv"
+ " TCPDSACKOfoRecv"
+ " TCPAbortOnSyn TCPAbortOnData TCPAbortOnClose"
+ " TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger"
+ " TCPAbortFailed TCPMemoryPressures\n"
+ "TcpExt:");
+ for (i = 0;
+ i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long);
+ i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) net_statistics, i));
seq_putc(seq, '\n');
return 0;
}
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
-#include <linux/vs_base.h>
struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE];
rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
write_unlock_bh(&raw_v4_lock);
}
+
+/*
+ Check if an address is in the list
+*/
+static inline int raw_addr_in_list (
+ u32 rcv_saddr1,
+ u32 rcv_saddr2,
+ u32 loc_addr,
+ struct nx_info *nx_info)
+{
+ int ret = 0;
+ if (loc_addr != 0 &&
+ (rcv_saddr1 == loc_addr || rcv_saddr2 == loc_addr))
+ ret = 1;
+ else if (rcv_saddr1 == 0) {
+ /* Accept any address or only the one in the list */
+ if (nx_info == NULL)
+ ret = 1;
+ else {
+ int n = nx_info->nbipv4;
+ int i;
+ for (i=0; i<n; i++) {
+ if (nx_info->ipv4[i] == loc_addr) {
+ ret = 1;
+ break;
+ }
+ }
+ }
+ }
+ return ret;
+}
+
struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
unsigned long raddr, unsigned long laddr,
int dif)
if (inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
- !(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
+ raw_addr_in_list(inet->rcv_saddr, inet->rcv_saddr2,
+ laddr, sk->sk_nx_info) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found; /* gotcha */
}
err = -EFAULT;
kfree_skb(skb);
error:
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- if (flags & MSG_TRUNC)
- copied = skb->len;
done:
skb_free_datagram(sk, skb);
-out: return err ? err : copied;
+out: return err ? : copied;
}
static int raw_init(struct sock *sk)
struct proto raw_prot = {
.name = "RAW",
.close = raw_close,
- .connect = ip4_datagram_connect,
+ .connect = udp_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
rth->rt_flags = flags;
+#ifdef CONFIG_NET_FASTROUTE
+ if (netdev_fastroute && !(flags&(RTCF_NAT|RTCF_MASQ|RTCF_DOREDIRECT))) {
+ struct net_device *odev = rth->u.dst.dev;
+ if (odev != dev &&
+ dev->accept_fastpath &&
+ odev->mtu >= dev->mtu &&
+ dev->accept_fastpath(dev, &rth->u.dst) == 0)
+ rth->rt_flags |= RTCF_FAST;
+ }
+#endif
+
intern:
err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
done:
static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
struct file *filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(ctl, write, filp, buffer, lenp);
rt_cache_flush(flush_delay);
return 0;
}
;
*mssp = msstab[mssind] + 1;
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
+ NET_INC_STATS_BH(SyncookiesSent);
return secure_tcp_syn_cookie(skb->nh.iph->saddr, skb->nh.iph->daddr,
skb->h.th->source, skb->h.th->dest,
if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
(mss = cookie_check(skb, cookie)) == 0) {
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
+ NET_INC_STATS_BH(SyncookiesFailed);
goto out;
}
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
+ NET_INC_STATS_BH(SyncookiesRecv);
req = tcp_openreq_alloc();
ret = NULL;
extern int sysctl_icmp_echo_ignore_all;
extern int sysctl_icmp_echo_ignore_broadcasts;
extern int sysctl_icmp_ignore_bogus_error_responses;
-#ifdef CONFIG_ICMP_IPOD
-extern int sysctl_icmp_ipod_version;
-extern int sysctl_icmp_ipod_enabled;
-extern u32 sysctl_icmp_ipod_host;
-extern u32 sysctl_icmp_ipod_mask;
-extern char sysctl_icmp_ipod_key[32+1];
-#endif
/* From ip_fragment.c */
extern int sysctl_ipfrag_low_thresh;
static
int ipv4_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int val = ipv4_devconf.forwarding;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && ipv4_devconf.forwarding != val)
inet_forward_change();
.mode = 0644,
.proc_handler = &proc_dointvec
},
-#ifdef CONFIG_ICMP_IPOD
- {
- .ctl_name = NET_IPV4_ICMP_IPOD_VERSION,
- .procname = "icmp_ipod_version",
- .data = &sysctl_icmp_ipod_version,
- .maxlen = sizeof(sysctl_icmp_ipod_version),
- .mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = NET_IPV4_ICMP_IPOD_ENABLED,
- .procname = "icmp_ipod_enabled",
- .data = &sysctl_icmp_ipod_enabled,
- .maxlen = sizeof(sysctl_icmp_ipod_enabled),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = NET_IPV4_ICMP_IPOD_HOST,
- .procname = "icmp_ipod_host",
- .data = &sysctl_icmp_ipod_host,
- .maxlen = sizeof(sysctl_icmp_ipod_host),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = NET_IPV4_ICMP_IPOD_MASK,
- .procname = "icmp_ipod_mask",
- .data = &sysctl_icmp_ipod_mask,
- .maxlen = sizeof(sysctl_icmp_ipod_mask),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = NET_IPV4_ICMP_IPOD_KEY,
- .procname = "icmp_ipod_key",
- .data = &sysctl_icmp_ipod_key,
- .maxlen = sizeof(sysctl_icmp_ipod_key),
- .mode = 0600,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string
- },
-#endif
{
.ctl_name = NET_IPV4_ROUTE,
.procname = "route",
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
int sysctl_tcp_default_win_scale = 7;
-
int sysctl_tcp_mem[3];
int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
void tcp_enter_memory_pressure(void)
{
if (!tcp_memory_pressure) {
- NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+ NET_INC_STATS(TCPMemoryPressures);
tcp_memory_pressure = 1;
}
}
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-void cleanup_rbuf(struct sock *sk, int copied)
+static void cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_opt *tp = tcp_sk(sk);
int time_to_ack = 0;
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
- NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+ NET_INC_STATS_USER(TCPAbortOnClose);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_USER(TCPAbortOnData);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
+ NET_INC_STATS_BH(TCPAbortOnLinger);
} else {
int tmo = tcp_fin_time(tp);
"sockets\n");
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+ NET_INC_STATS_BH(TCPAbortOnMemory);
}
}
atomic_inc(&tcp_orphan_count);
EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
-EXPORT_SYMBOL_GPL(cleanup_rbuf);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static void init_bictcp(struct tcp_opt *tp)
-{
- tp->bictcp.cnt = 0;
-
- tp->bictcp.last_max_cwnd = 0;
- tp->bictcp.last_cwnd = 0;
- tp->bictcp.last_stamp = 0;
-}
-
/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
{
/* This exciting event is worth to be remembered. 8) */
if (ts)
- NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
+ NET_INC_STATS_BH(TCPTSReorder);
else if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
+ NET_INC_STATS_BH(TCPRenoReorder);
else if (IsFack(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
+ NET_INC_STATS_BH(TCPFACKReorder);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
+ NET_INC_STATS_BH(TCPSACKReorder);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->sack_ok, tp->ca_state,
if (before(start_seq, ack)) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+ NET_INC_STATS_BH(TCPDSACKRecv);
} else if (num_sacks > 1 &&
!after(end_seq, ntohl(sp[1].end_seq)) &&
!before(start_seq, ntohl(sp[1].start_seq))) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+ NET_INC_STATS_BH(TCPDSACKOfoRecv);
}
/* D-SACK for already forgotten data...
tp->lost_out++;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
flag |= FLAG_DATA_SACKED;
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
+ NET_INC_STATS_BH(TCPLostRetransmit);
}
}
}
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
-
- init_bictcp(tp);
}
void tcp_clear_retrans(struct tcp_opt *tp)
*/
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+ NET_INC_STATS_BH(TCPSACKReneging);
tcp_enter_loss(sk, 1);
tp->retransmits++;
DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(tp, 1);
if (tp->ca_state == TCP_CA_Loss)
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
+ NET_INC_STATS_BH(TCPFullUndo);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && IsReno(tp)) {
DBGUNDO(sk, tp, "D-SACK");
tcp_undo_cwr(tp, 1);
tp->undo_marker = 0;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+ NET_INC_STATS_BH(TCPDSACKUndo);
}
}
DBGUNDO(sk, tp, "Hoe");
tcp_undo_cwr(tp, 0);
- NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+ NET_INC_STATS_BH(TCPPartialUndo);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
tp->lost_out = 0;
tp->left_out = tp->sacked_out;
tcp_undo_cwr(tp, 1);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
tp->retransmits = 0;
tp->undo_marker = 0;
if (!IsReno(tp))
tp->ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+ NET_INC_STATS_BH(TCPLoss);
}
/* D. Synchronize left_out to current state. */
/* Otherwise enter Recovery state */
if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
+ NET_INC_STATS_BH(TCPRenoRecovery);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
+ NET_INC_STATS_BH(TCPSackRecovery);
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
if (!sysctl_tcp_bic)
return tp->snd_cwnd;
- if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
- (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5))
- return tp->bictcp.cnt;
-
+ if (tp->bictcp.last_cwnd == tp->snd_cwnd)
+ return tp->bictcp.cnt; /* same cwnd, no update */
+
tp->bictcp.last_cwnd = tp->snd_cwnd;
- tp->bictcp.last_stamp = tcp_time_stamp;
/* start off normal */
if (tp->snd_cwnd <= sysctl_tcp_bic_low_window)
tcp_westwood_fast_bw(sk, skb);
flag |= FLAG_WIN_UPDATE;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+ NET_INC_STATS_BH(TCPHPAcks);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
- NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+ NET_INC_STATS_BH(TCPPureAcks);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
{
if (tp->sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt))
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
+ NET_INC_STATS_BH(TCPDSACKOldSent);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
+ NET_INC_STATS_BH(TCPDSACKOfoSent);
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_enter_quickack_mode(tp);
if (tp->sack_ok && sysctl_tcp_dsack) {
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
continue;
}
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
return;
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
- NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+ NET_INC_STATS_BH(PruneCalled);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
- NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED,
+ NET_ADD_STATS_BH(OfoPruned,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
* drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space.
*/
- NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+ NET_INC_STATS_BH(RcvPruned);
/* Massive buffer overcommit. */
tp->pred_flags = 0;
tcp_data_snd_check(sk);
return 0;
} else { /* Header too small */
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
}
} else {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+ NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
}
}
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+ NET_INC_STATS_BH(TCPHPHits);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ TCP_INC_STATS_BH(TcpInErrs);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
return 0;
csum_error:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
discard:
__kfree_skb(skb);
if (tp->saw_tstamp && tp->rcv_tsecr &&
!between(tp->rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+ NET_INC_STATS_BH(PAWSActiveRejected);
goto reset_and_undo;
}
return 1;
init_westwood(sk);
- init_bictcp(tp);
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
case TCP_SYN_SENT:
init_westwood(sk);
- init_bictcp(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
* Check for a SYN in window.
*/
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
return 1;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
tcp_reset(sk);
return 1;
}
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/vserver/debug.h>
extern int sysctl_ip_dynaddr;
int sysctl_tcp_tw_reuse;
tcp_sk(sk)->bind_hash = tb;
}
+/*
+ Return 1 if addr match the socket IP list
+ or the socket is INADDR_ANY
+*/
+static inline int tcp_in_list(struct sock *sk, u32 addr)
+{
+ struct nx_info *nxi = sk->sk_nx_info;
+
+ vxdprintk("tcp_in_list(%p) %p,%p;%lx\n",
+ sk, nxi, sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+
+ if (nxi) {
+ int n = nxi->nbipv4;
+ int i;
+
+ for (i=0; i<n; i++)
+ if (nxi->ipv4[i] == addr)
+ return 1;
+ }
+ else if (!tcp_v4_rcv_saddr(sk) || tcp_v4_rcv_saddr(sk) == addr)
+ return 1;
+ return 0;
+}
+
+/*
+ Check if the addresses in sk1 conflict with those in sk2
+*/
+int tcp_ipv4_addr_conflict(struct sock *sk1, struct sock *sk2)
+{
+ if (sk1 && sk2)
+ nxdprintk("inet_bind(%p,%p) %p,%p;%lx %p,%p;%lx\n",
+ sk1, sk2,
+ sk1->sk_nx_info, sk1->sk_socket,
+ (sk1->sk_socket?sk1->sk_socket->flags:0),
+ sk2->sk_nx_info, sk2->sk_socket,
+ (sk2->sk_socket?sk2->sk_socket->flags:0));
+
+ if (tcp_v4_rcv_saddr(sk1)) {
+ /* Bind to one address only */
+ return tcp_in_list (sk2, tcp_v4_rcv_saddr(sk1));
+ } else if (sk1->sk_nx_info) {
+ /* A restricted bind(any) */
+ struct nx_info *nxi = sk1->sk_nx_info;
+ int n = nxi->nbipv4;
+ int i;
+
+ for (i=0; i<n; i++)
+ if (tcp_in_list (sk2, nxi->ipv4[i]))
+ return 1;
+ } else /* A bind(any) do not allow other bind on the same port */
+ return 1;
+ return 0;
+}
+
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
{
- const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
- const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
- if (!sk2_rcv_saddr || !sk_rcv_saddr ||
- sk2_rcv_saddr == sk_rcv_saddr)
+ if (tcp_ipv4_addr_conflict(sk, sk2))
break;
}
}
wake_up(&tcp_lhash_wait);
}
+/*
+ Check if an address is in the list
+*/
+static inline int tcp_addr_in_list(
+ u32 rcv_saddr,
+ u32 daddr,
+ struct nx_info *nx_info)
+{
+ if (rcv_saddr == daddr)
+ return 1;
+ else if (rcv_saddr == 0) {
+ /* Accept any address or check the list */
+ if (!nx_info)
+ return 1;
+ else {
+ int n = nx_info->nbipv4;
+ int i;
+
+ for (i=0; i<n; i++)
+ if (nx_info->ipv4[i] == daddr)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+
/* Don't inline this cruft. Here are some nice properties to
* exploit here. The BSD API does not allow a listening TCP
* to specify the remote port nor the remote address for the
__u32 rcv_saddr = inet->rcv_saddr;
score = (sk->sk_family == PF_INET ? 1 : 0);
- if (rcv_saddr) {
- if (rcv_saddr != daddr)
- continue;
+ if (tcp_addr_in_list(rcv_saddr, daddr, sk->sk_nx_info))
score+=2;
- }
+ else
+ continue;
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
continue;
if (!hlist_empty(head)) {
struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
if (inet->num == hnum && !sk->sk_node.next &&
- (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
(sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
+ tcp_addr_in_list(inet->rcv_saddr, daddr, sk->sk_nx_info) &&
!sk->sk_bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
if (twp) {
*twp = tw;
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
} else if (tw) {
/* Silly. Should hash-dance instead... */
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
tcp_tw_put(tw);
}
int err;
if (skb->len < (iph->ihl << 2) + 8) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
th->source, tcp_v4_iif(skb));
if (!sk) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS(OutOfWindowIcmps);
goto out;
}
BUG_TRAP(!req->sk);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
It can f.e. if SYNs crossed.
*/
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
sk->sk_err = err;
sk->sk_error_report(sk);
ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
+ TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TcpOutRsts);
}
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TcpOutSegs);
}
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
.dport = req->rmt_port } } };
if (ip_route_output_flow(&rt, &fl, sk, 0)) {
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
return NULL;
}
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
ip_rt_put(rt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
return NULL;
}
return &rt->u.dst;
if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
+ NET_INC_STATS_BH(PAWSPassiveRejected);
dst_release(dst);
goto drop_and_free;
}
drop_and_free:
tcp_openreq_free(req);
drop:
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
return 0;
}
return newsk;
exit_overflow:
- NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+ NET_INC_STATS_BH(ListenOverflows);
exit:
- NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+ NET_INC_STATS_BH(ListenDrops);
dst_release(dst);
return NULL;
}
return 0;
csum_err:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
}
-extern struct proto_ops inet_stream_ops;
-
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-
/*
* From tcp_input.c
*/
goto discard_it;
/* Count it even if it's bad */
- TCP_INC_STATS_BH(TCP_MIB_INSEGS);
+ TCP_INC_STATS_BH(TcpInSegs);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
goto no_tcp_socket;
process:
- /* Silently drop if VNET is active (if INET bind() has been
- * overridden) and the context is not entitled to read the
- * packet.
- */
- if (inet_stream_ops.bind != inet_bind &&
- (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
- goto discard_it;
-
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
} else {
tcp_v4_send_reset(skb);
}
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
if (tp->bind_hash)
tcp_put_port(sk);
- /*
- * If sendmsg cached page exists, toss it.
- */
- if (sk->sk_sndmsg_page) {
- __free_page(sk->sk_sndmsg_page);
- sk->sk_sndmsg_page = NULL;
- }
-
atomic_dec(&tcp_sockets_allocated);
return 0;
req = req->dl_next;
while (1) {
while (req) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,req: %p [#%d] (from %d)",
- req->sk, req->sk->sk_xid, current->xid);
if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (req->class->family == st->family) {
sk = sk_next(sk);
get_sk:
sk_for_each_from(sk, node) {
- vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family == st->family) {
read_lock(&tcp_ehash[st->bucket].lock);
sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,egf: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family != st->family)
st->state = TCP_SEQ_STATE_TIME_WAIT;
tw_for_each(tw, node,
&tcp_ehash[st->bucket + tcp_ehash_size].chain) {
- vxdprintk(VXD_CBIT(net, 6),
- "tw: %p [#%d] (from %d)",
- tw, tw->tw_xid, current->xid);
if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
continue;
if (tw->tw_family != st->family)
tw = cur;
tw = tw_next(tw);
get_tw:
- while (tw && (tw->tw_family != st->family ||
- !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
+ while (tw && tw->tw_family != st->family &&
+ !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
tw = tw_next(tw);
}
if (tw) {
sk = sk_next(sk);
sk_for_each_from(sk, node) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,egn: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family == st->family)
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
-#include <linux/vs_limit.h>
#include <linux/vs_socket.h>
#include <net/tcp.h>
#include <net/inet_common.h>
}
if (paws_reject)
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
if(!th->rst) {
/* In this case we must reset the TIMEWAIT timer.
}
tcp_tw_count -= killed;
- NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
+ NET_ADD_STATS_BH(TimeWaited, killed);
return ret;
}
out:
if ((tcp_tw_count -= killed) == 0)
del_timer(&tcp_tw_timer);
- NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
+ NET_ADD_STATS_BH(TimeWaitKilled, killed);
spin_unlock(&tw_death_lock);
}
if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
- if (sk->sk_create_child)
- sk->sk_create_child(sk, newsk);
-
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newtp->snd_cwnd = 2;
newtp->snd_cwnd_cnt = 0;
+ newtp->bictcp.cnt = 0;
+ newtp->bictcp.last_max_cwnd = newtp->bictcp.last_cwnd = 0;
+
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
newsk->sk_priority = 0;
atomic_set(&newsk->sk_refcnt, 2);
- set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
- newsk->sk_xid = sk->sk_xid;
- vx_sock_inc(newsk);
- set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
- newsk->sk_nid = sk->sk_nid;
+ /* hmm, maybe from socket? */
+ set_vx_info(&newsk->sk_vx_info, current->vx_info);
+ set_nx_info(&newsk->sk_nx_info, current->nx_info);
#ifdef INET_REFCNT_DEBUG
atomic_inc(&inet_sock_nr);
#endif
newsk->sk_no_largesend = 1;
tcp_vegas_init(newtp);
- TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
+ TCP_INC_STATS_BH(TcpPassiveOpens);
}
return newsk;
}
if (!(flg & TCP_FLAG_RST))
req->class->send_ack(skb, req);
if (paws_reject)
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
return NULL;
}
}
embryonic_reset:
- NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
+ NET_INC_STATS_BH(EmbryonicRsts);
if (!(flg & TCP_FLAG_RST))
req->class->send_reset(skb);
tp->rcv_wnd = new_win;
tp->rcv_wup = tp->rcv_nxt;
- /* Make sure we do not exceed the maximum possible
- * scaled window.
- */
- if (!tp->rcv_wscale)
- new_win = min(new_win, MAX_TCP_WINDOW);
- else
- new_win = min(new_win, (65535U << tp->rcv_wscale));
-
/* RFC1323 scaling applied */
new_win >>= tp->rcv_wscale;
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, skb, sk);
- TCP_INC_STATS(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(TcpOutSegs);
err = tp->af_specific->queue_xmit(skb, 0);
if (err <= 0)
if (err == 0) {
/* Update global TCP statistics. */
- TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
+ TCP_INC_STATS(TcpRetransSegs);
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
if (tcp_retransmit_skb(sk, skb))
return;
if (tp->ca_state != TCP_CA_Loss)
- NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
+ NET_INC_STATS_BH(TCPFastRetrans);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
+ NET_INC_STATS_BH(TCPSlowStartRetrans);
if (skb ==
skb_peek(&sk->sk_write_queue))
if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
- NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
+ NET_INC_STATS_BH(TCPForwardRetrans);
}
}
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
- NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
+ NET_INC_STATS(TCPAbortFailed);
return;
}
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb))
- NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
+ NET_INC_STATS(TCPAbortFailed);
}
/* WARNING: This routine must only be called when we have already sent
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
- TCP_INC_STATS(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(TcpOutSegs);
return skb;
}
sk_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
- TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
+ TCP_INC_STATS(TcpActiveOpens);
/* Timer for repeating the SYN until an answer. */
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
+ NET_INC_STATS_BH(TCPAbortOnTimeout);
}
/* Do not allow orphaned sockets to eat all our resources.
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+ NET_INC_STATS_BH(TCPAbortOnMemory);
return 1;
}
return 0;
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+ NET_INC_STATS_BH(DelayedACKLocked);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(TCPSchedulerFailed,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
+ NET_INC_STATS_BH(DelayedACKs);
}
TCP_CHECK_TIMER(sk);
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
+ NET_INC_STATS_BH(TCPSackRecoveryFail);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
+ NET_INC_STATS_BH(TCPSackFailures);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
+ NET_INC_STATS_BH(TCPRenoRecoveryFail);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
+ NET_INC_STATS_BH(TCPRenoFailures);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
+ NET_INC_STATS_BH(TCPLossFailures);
} else {
- NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
+ NET_INC_STATS_BH(TCPTimeouts);
}
}
/* Shared by v4/v6 udp. */
int udp_port_rover;
+int tcp_ipv4_addr_conflict(struct sock *sk1, struct sock *sk2);
+
static int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
struct hlist_node *node;
(!sk2->sk_bound_dev_if ||
!sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- (!inet2->rcv_saddr ||
- !inet->rcv_saddr ||
- inet2->rcv_saddr == inet->rcv_saddr) &&
+ tcp_ipv4_addr_conflict(sk2, sk) &&
(!sk2->sk_reuse || !sk->sk_reuse))
goto fail;
}
write_unlock_bh(&udp_hash_lock);
}
+static inline int udp_in_list(struct nx_info *nx_info, u32 addr)
+{
+ int n = nx_info->nbipv4;
+ int i;
+
+ for (i=0; i<n; i++)
+ if (nx_info->ipv4[i] == addr)
+ return 1;
+ return 0;
+}
+
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
if (inet->rcv_saddr != daddr)
continue;
score+=2;
+ } else if (sk->sk_nx_info) {
+ if (udp_in_list(sk->sk_nx_info, daddr))
+ score+=2;
+ else
+ continue;
}
if (inet->daddr) {
if (inet->daddr != saddr)
if (inet->num != hnum ||
(inet->daddr && inet->daddr != rmt_addr) ||
(inet->dport != rmt_port && inet->dport) ||
- (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
+ (inet->rcv_saddr && inet->rcv_saddr != loc_addr &&
+ inet->rcv_saddr2 && inet->rcv_saddr2 != loc_addr) ||
ipv6_only_sock(s) ||
(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return; /* No socket for error */
}
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = dport } } };
+ struct nx_info *nxi = sk->sk_nx_info;
+
+ if (nxi) {
+ err = ip_find_src(nxi, &rt, &fl);
+ if (err)
+ goto out;
+ if (daddr == IPI_LOOPBACK && !vx_check(0, VX_ADMIN))
+ daddr = fl.fl4_dst = nxi->ipv4[0];
+ }
err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
if (err)
goto out;
if (free)
kfree(ipc.opt);
if (!err) {
- UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+ UDP_INC_STATS_USER(UdpOutDatagrams);
return len;
}
return err;
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
return err;
csum_copy_err:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
/* Clear queue. */
if (flags&MSG_PEEK) {
goto try_again;
}
+int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_opt *inet = inet_sk(sk);
+ struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
+ struct rtable *rt;
+ u32 saddr;
+ int oif;
+ int err;
+
+
+ if (addr_len < sizeof(*usin))
+ return -EINVAL;
+
+ if (usin->sin_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ sk_dst_reset(sk);
+
+ oif = sk->sk_bound_dev_if;
+ saddr = inet->saddr;
+ if (MULTICAST(usin->sin_addr.s_addr)) {
+ if (!oif)
+ oif = inet->mc_index;
+ if (!saddr)
+ saddr = inet->mc_addr;
+ }
+ err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
+ RT_CONN_FLAGS(sk), oif,
+ IPPROTO_UDP,
+ inet->sport, usin->sin_port, sk);
+ if (err)
+ return err;
+ if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
+ ip_rt_put(rt);
+ return -EACCES;
+ }
+ if (!inet->saddr)
+ inet->saddr = rt->rt_src; /* Update source address */
+ if (!inet->rcv_saddr)
+ inet->rcv_saddr = rt->rt_src;
+ inet->daddr = rt->rt_dst;
+ inet->dport = usin->sin_port;
+ sk->sk_state = TCP_ESTABLISHED;
+ inet->id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+ return(0);
+}
int udp_disconnect(struct sock *sk, int flags)
{
} else
/* Must be an IKE packet.. pass it through */
return 1;
- break;
+
case UDP_ENCAP_ESPINUDP_NON_IKE:
/* Check if this is a keepalive packet. If so, eat it. */
if (len == 1 && udpdata[0] == 0xff) {
} else
/* Must be an IKE packet.. pass it through */
return 1;
- break;
}
/* At this point we are sure that this is an ESPinUDP packet,
if (ret < 0) {
/* process the ESP packet */
ret = xfrm4_rcv_encap(skb, up->encap_type);
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP_INC_STATS_BH(UdpInDatagrams);
return -ret;
}
/* FALLTHROUGH -- it's a UDP Packet */
if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return -1;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return -1;
}
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP_INC_STATS_BH(UdpInDatagrams);
return 0;
}
if (udp_checksum_complete(skb))
goto csum_error;
- UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
+ UDP_INC_STATS_BH(UdpNoPorts);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
NIPQUAD(daddr),
ntohs(uh->dest)));
no_header:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
ntohs(uh->dest),
ulen));
drop:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
}
struct proto udp_prot = {
.name = "UDP",
.close = udp_close,
- .connect = ip4_datagram_connect,
+ .connect = udp_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udp_destroy_sock,
}
#endif /* CONFIG_PROC_FS */
+EXPORT_SYMBOL(udp_connect);
EXPORT_SYMBOL(udp_disconnect);
EXPORT_SYMBOL(udp_hash);
EXPORT_SYMBOL(udp_hash_lock);
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
-#include <net/icmp.h>
/* Add encapsulation header.
*
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
}
-static int xfrm4_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst;
- struct iphdr *iph = skb->nh.iph;
-
- if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
- goto out;
-
- IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
-
- if (!(iph->frag_off & htons(IP_DF)))
- goto out;
-
- dst = skb->dst;
- mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
- if (skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- ret = -EMSGSIZE;
- }
-out:
- return ret;
-}
-
int xfrm4_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
proto == x->id.proto &&
saddr->a4 == x->props.saddr.a4 &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ &&
- !x->id.spi) {
+ x->km.state == XFRM_STATE_ACQ) {
+ if (!x0)
+ x0 = x;
+ if (x->id.spi)
+ continue;
x0 = x;
break;
}
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include <net/ip.h>
-#include <net/protocol.h>
+#include <net/icmp.h>
+#include <net/inet_ecn.h>
+
+int xfrm4_tunnel_check_size(struct sk_buff *skb)
+{
+ int mtu, ret = 0;
+ struct dst_entry *dst;
+ struct iphdr *iph = skb->nh.iph;
+
+ if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
+ goto out;
+
+ IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+
+ if (!(iph->frag_off & htons(IP_DF)))
+ goto out;
+
+ dst = skb->dst;
+ mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
+ if (skb->len > mtu) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ ret = -EMSGSIZE;
+ }
+out:
+ return ret;
+}
static int ipip_output(struct sk_buff **pskb)
{
config IPV6_PRIVACY
bool "IPv6: Privacy Extensions (RFC 3041) support"
depends on IPV6
+ select CRYPTO
+ select CRYPTO_MD5
---help---
Privacy Extensions for Stateless Address Autoconfiguration in IPv6
support. With this option, additional periodically-alter
ip6_flowlabel.o ipv6_syms.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
- xfrm6_tunnel.o xfrm6_output.o
+ xfrm6_tunnel.o
ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
- ifr.ifr_ifru.ifru_data = (void __user *)&p;
+ ifr.ifr_ifru.ifru_data = (void*)&p;
oldfs = get_fs(); set_fs(KERNEL_DS);
err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
static
int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val && valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev = NULL;
.flags = INET_PROTOSW_REUSE,
};
+#define INETSW6_ARRAY_LEN (sizeof(inetsw6_array) / sizeof(struct inet_protosw))
+
void
inet6_register_protosw(struct inet_protosw *p)
{
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ah.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
-#include <linux/string.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
return 0;
}
-/**
- * ipv6_rearrange_rthdr - rearrange IPv6 routing header
- * @iph: IPv6 header
- * @rthdr: routing header
- *
- * Rearrange the destination address in @iph and the addresses in @rthdr
- * so that they appear in the order they will at the final destination.
- * See Appendix A2 of RFC 2402 for details.
- */
-static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
+static int ipv6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
{
- int segments, segments_left;
- struct in6_addr *addrs;
- struct in6_addr final_addr;
-
- segments_left = rthdr->segments_left;
- if (segments_left == 0)
- return;
- rthdr->segments_left = 0;
-
- /* The value of rthdr->hdrlen has been verified either by the system
- * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
- * packets. So we can assume that it is even and that segments is
- * greater than or equal to segments_left.
- *
- * For the same reason we can assume that this option is of type 0.
- */
- segments = rthdr->hdrlen >> 1;
+ u16 offset = sizeof(struct ipv6hdr);
+ struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ unsigned int packet_len = skb->tail - skb->nh.raw;
+ u8 nexthdr = skb->nh.ipv6h->nexthdr;
+ u8 nextnexthdr = 0;
- addrs = ((struct rt0_hdr *)rthdr)->addr;
- ipv6_addr_copy(&final_addr, addrs + segments - 1);
+ *nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
- addrs += segments - segments_left;
- memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
+ while (offset + 1 <= packet_len) {
- ipv6_addr_copy(addrs, &iph->daddr);
- ipv6_addr_copy(&iph->daddr, &final_addr);
-}
-
-static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
-{
- union {
- struct ipv6hdr *iph;
- struct ipv6_opt_hdr *opth;
- struct ipv6_rt_hdr *rth;
- char *raw;
- } exthdr = { .iph = iph };
- char *end = exthdr.raw + len;
- int nexthdr = iph->nexthdr;
-
- exthdr.iph++;
-
- while (exthdr.raw < end) {
switch (nexthdr) {
+
case NEXTHDR_HOP:
- case NEXTHDR_DEST:
- if (!zero_out_mutable_opts(exthdr.opth)) {
- LIMIT_NETDEBUG(printk(
- KERN_WARNING "overrun %sopts\n",
- nexthdr == NEXTHDR_HOP ?
- "hop" : "dest"));
- return -EINVAL;
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun hopopts\n"));
+ return 0;
}
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
- ipv6_rearrange_rthdr(iph, exthdr.rth);
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ ((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ break;
+
+ case NEXTHDR_DEST:
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun destopt\n"));
+ return 0;
+ }
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
+ case NEXTHDR_AUTH:
+ if (dir == XFRM_POLICY_OUT) {
+ memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
+ (((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
+ }
+ if (exthdr->nexthdr == NEXTHDR_DEST) {
+ offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ nextnexthdr = exthdr->nexthdr;
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun destopt\n"));
+ return 0;
+ }
+ }
+ return nexthdr;
default :
- return 0;
+ return nexthdr;
}
-
- nexthdr = exthdr.opth->nexthdr;
- exthdr.raw += ipv6_optlen(exthdr.opth);
}
- return 0;
+ return nexthdr;
}
int ah6_output(struct sk_buff **pskb)
{
int err;
- int extlen;
+ int hdr_len = sizeof(struct ipv6hdr);
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
+ struct ipv6hdr *iph = NULL;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
+ u16 nh_offset = 0;
u8 nexthdr;
- char tmp_base[8];
- struct {
- struct in6_addr daddr;
- char hdrs[0];
- } *tmp_ext;
- top_iph = (struct ipv6hdr *)(*pskb)->data;
- top_iph->payload_len = htons((*pskb)->len - sizeof(*top_iph));
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
- nexthdr = *(*pskb)->nh.raw;
- *(*pskb)->nh.raw = IPPROTO_AH;
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
- /* When there are no extension headers, we only need to save the first
- * 8 bytes of the base IP header.
- */
- memcpy(tmp_base, top_iph, sizeof(tmp_base));
-
- tmp_ext = NULL;
- extlen = (*pskb)->h.raw - (unsigned char *)(top_iph + 1);
- if (extlen) {
- extlen += sizeof(*tmp_ext);
- tmp_ext = kmalloc(extlen, GFP_ATOMIC);
- if (!tmp_ext) {
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+
+ iph = (*pskb)->nh.ipv6h;
+ (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ (*pskb)->nh.ipv6h->version = 6;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ (*pskb)->nh.ipv6h->nexthdr = IPPROTO_AH;
+ ipv6_addr_copy(&(*pskb)->nh.ipv6h->saddr,
+ (struct in6_addr *) &x->props.saddr);
+ ipv6_addr_copy(&(*pskb)->nh.ipv6h->daddr,
+ (struct in6_addr *) &x->id.daddr);
+ ah = (struct ip_auth_hdr*)((*pskb)->nh.ipv6h+1);
+ ah->nexthdr = IPPROTO_IPV6;
+ } else {
+ hdr_len = (*pskb)->h.raw - (*pskb)->nh.raw;
+ iph = kmalloc(hdr_len, GFP_ATOMIC);
+ if (!iph) {
err = -ENOMEM;
goto error;
}
- memcpy(tmp_ext, &top_iph->daddr, extlen);
- err = ipv6_clear_mutable_options(top_iph,
- extlen - sizeof(*tmp_ext) +
- sizeof(*top_iph));
- if (err)
+ memcpy(iph, (*pskb)->data, hdr_len);
+ (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
+ nexthdr = ipv6_clear_mutable_options(*pskb, &nh_offset, XFRM_POLICY_OUT);
+ if (nexthdr == 0)
goto error_free_iph;
- }
- ah = (struct ip_auth_hdr *)(*pskb)->h.raw;
- ah->nexthdr = nexthdr;
+ (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ ah = (struct ip_auth_hdr*)((*pskb)->nh.raw+hdr_len);
+ (*pskb)->h.raw = (unsigned char*) ah;
+ ah->nexthdr = nexthdr;
+ }
- top_iph->priority = 0;
- top_iph->flow_lbl[0] = 0;
- top_iph->flow_lbl[1] = 0;
- top_iph->flow_lbl[2] = 0;
- top_iph->hop_limit = 0;
+ (*pskb)->nh.ipv6h->priority = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[0] = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[1] = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[2] = 0;
+ (*pskb)->nh.ipv6h->hop_limit = 0;
ahp = x->data;
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, *pskb, ah->auth_data);
- err = 0;
-
- memcpy(top_iph, tmp_base, sizeof(tmp_base));
- if (tmp_ext) {
- memcpy(&top_iph->daddr, tmp_ext, extlen);
-error_free_iph:
- kfree(tmp_ext);
+ if (x->props.mode) {
+ (*pskb)->nh.ipv6h->hop_limit = iph->hop_limit;
+ (*pskb)->nh.ipv6h->priority = iph->priority;
+ (*pskb)->nh.ipv6h->flow_lbl[0] = iph->flow_lbl[0];
+ (*pskb)->nh.ipv6h->flow_lbl[1] = iph->flow_lbl[1];
+ (*pskb)->nh.ipv6h->flow_lbl[2] = iph->flow_lbl[2];
+ if (x->props.flags & XFRM_STATE_NOECN)
+ IP6_ECN_clear((*pskb)->nh.ipv6h);
+ } else {
+ memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
+ (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ kfree (iph);
}
+ (*pskb)->nh.raw = (*pskb)->data;
+
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ return NET_XMIT_BYPASS;
+error_free_iph:
+ kfree(iph);
error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
return err;
}
* Before process AH
* [IPv6][Ext1][Ext2][AH][Dest][Payload]
* |<-------------->| hdr_len
+ * |<------------------------>| cleared_hlen
*
* To erase AH:
* Keeping copy of cleared headers. After AH processing,
unsigned char *tmp_hdr = NULL;
u16 hdr_len;
u16 ah_hlen;
- int nexthdr;
+ u16 cleared_hlen;
+ u16 nh_offset = 0;
+ u8 nexthdr = 0;
+ u8 *prevhdr;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
goto out;
hdr_len = skb->data - skb->nh.raw;
+ cleared_hlen = hdr_len;
ah = (struct ipv6_auth_hdr*)skb->data;
ahp = x->data;
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
+ cleared_hlen += ah_hlen;
+
+ if (nexthdr == NEXTHDR_DEST) {
+ struct ipv6_opt_hdr *dsthdr = (struct ipv6_opt_hdr*)(skb->data + ah_hlen);
+ cleared_hlen += ipv6_optlen(dsthdr);
+ }
if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
if (!pskb_may_pull(skb, ah_hlen))
goto out;
- tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
+ tmp_hdr = kmalloc(cleared_hlen, GFP_ATOMIC);
if (!tmp_hdr)
goto out;
- memcpy(tmp_hdr, skb->nh.raw, hdr_len);
- if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
- goto out;
+ memcpy(tmp_hdr, skb->nh.raw, cleared_hlen);
+ ipv6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
skb->nh.ipv6h->priority = 0;
skb->nh.ipv6h->flow_lbl[0] = 0;
skb->nh.ipv6h->flow_lbl[1] = 0;
skb->nh.raw = skb_pull(skb, ah_hlen);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
+ if (nexthdr == NEXTHDR_DEST) {
+ memcpy(skb->nh.raw + hdr_len,
+ tmp_hdr + hdr_len + ah_hlen,
+ cleared_hlen - hdr_len - ah_hlen);
+ }
+ prevhdr = (u8*)(skb->nh.raw + nh_offset);
+ *prevhdr = nexthdr;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb_pull(skb, hdr_len);
skb->h.raw = skb->data;
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
-#include <net/ip6_route.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
- struct inet_opt *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr;
- struct dst_entry *dst;
- struct flowi fl;
- struct ip6_flowlabel *flowlabel = NULL;
- int addr_type;
- int err;
-
- if (usin->sin6_family == AF_INET) {
- if (__ipv6_only_sock(sk))
- return -EAFNOSUPPORT;
- err = ip4_datagram_connect(sk, uaddr, addr_len);
- goto ipv4_connected;
- }
-
- if (addr_len < SIN6_LEN_RFC2133)
- return -EINVAL;
-
- if (usin->sin6_family != AF_INET6)
- return -EAFNOSUPPORT;
-
- memset(&fl, 0, sizeof(fl));
- if (np->sndflow) {
- fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
- if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
- flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
- if (flowlabel == NULL)
- return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
- }
- }
-
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
- /*
- * connect to self
- */
- usin->sin6_addr.s6_addr[15] = 0x01;
- }
-
- daddr = &usin->sin6_addr;
-
- if (addr_type == IPV6_ADDR_MAPPED) {
- struct sockaddr_in sin;
-
- if (__ipv6_only_sock(sk)) {
- err = -ENETUNREACH;
- goto out;
- }
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = daddr->s6_addr32[3];
- sin.sin_port = usin->sin6_port;
-
- err = ip4_datagram_connect(sk,
- (struct sockaddr*) &sin,
- sizeof(sin));
-
-ipv4_connected:
- if (err)
- goto out;
-
- ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
-
- if (ipv6_addr_any(&np->saddr)) {
- ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
- inet->saddr);
- }
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
- inet->rcv_saddr);
- }
- goto out;
- }
-
- if (addr_type&IPV6_ADDR_LINKLOCAL) {
- if (addr_len >= sizeof(struct sockaddr_in6) &&
- usin->sin6_scope_id) {
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != usin->sin6_scope_id) {
- err = -EINVAL;
- goto out;
- }
- sk->sk_bound_dev_if = usin->sin6_scope_id;
- if (!sk->sk_bound_dev_if &&
- (addr_type & IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
- }
-
- /* Connect to link-local address requires an interface */
- if (!sk->sk_bound_dev_if) {
- err = -EINVAL;
- goto out;
- }
- }
-
- ipv6_addr_copy(&np->daddr, daddr);
- np->flow_label = fl.fl6_flowlabel;
-
- inet->dport = usin->sin6_port;
-
- /*
- * Check for a route to destination an obtain the
- * destination cache for it.
- */
-
- fl.proto = sk->sk_protocol;
- ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
- ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
- fl.fl_ip_dport = inet->dport;
- fl.fl_ip_sport = inet->sport;
-
- if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
-
- if (flowlabel) {
- if (flowlabel->opt && flowlabel->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
- } else if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
-
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
- goto out;
-
- /* source address lookup done in ip6_dst_lookup */
-
- if (ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &fl.fl6_src);
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
- inet->rcv_saddr = LOOPBACK4_IPV6;
- }
-
- ip6_dst_store(sk, dst,
- !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
- &np->daddr : NULL);
-
- sk->sk_state = TCP_ESTABLISHED;
-out:
- fl6_sock_release(flowlabel);
- return err;
-}
-
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
u16 port, u32 info, u8 *payload)
{
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
int esp6_output(struct sk_buff **pskb)
{
int err;
- int hdr_len;
+ int hdr_len = 0;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
+ struct ipv6hdr *iph = NULL, *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
int clen;
int alen;
int nfrags;
+ u8 *prevhdr;
+ u8 nexthdr = 0;
- esp = x->data;
- hdr_len = (*pskb)->h.raw - (*pskb)->data +
- sizeof(*esph) + esp->conf.ivlen;
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
- /* Strip IP+ESP header. */
- __skb_pull(*pskb, hdr_len);
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
+
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+ } else {
+ /* Strip IP header in transport mode. Save it. */
+ hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
+ nexthdr = *prevhdr;
+ *prevhdr = IPPROTO_ESP;
+ iph = kmalloc(hdr_len, GFP_ATOMIC);
+ if (!iph) {
+ err = -ENOMEM;
+ goto error;
+ }
+ memcpy(iph, (*pskb)->nh.raw, hdr_len);
+ __skb_pull(*pskb, hdr_len);
+ }
/* Now skb is pure payload to encrypt */
err = -ENOMEM;
/* Round to block size */
clen = (*pskb)->len;
+ esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
if ((nfrags = skb_cow_data(*pskb, clen-(*pskb)->len+alen, &trailer)) < 0) {
+ if (!x->props.mode && iph) kfree(iph);
goto error;
}
*(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
pskb_put(*pskb, trailer, clen - (*pskb)->len);
- top_iph = (struct ipv6hdr *)__skb_push(*pskb, hdr_len);
- esph = (struct ipv6_esp_hdr *)(*pskb)->h.raw;
- top_iph->payload_len = htons((*pskb)->len + alen - sizeof(*top_iph));
- *(u8*)(trailer->tail - 1) = *(*pskb)->nh.raw;
- *(*pskb)->nh.raw = IPPROTO_ESP;
+ if (x->props.mode) {
+ iph = (*pskb)->nh.ipv6h;
+ top_iph = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ esph = (struct ipv6_esp_hdr*)(top_iph+1);
+ *(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ if (x->props.flags & XFRM_STATE_NOECN)
+ IP6_ECN_clear(top_iph);
+ top_iph->nexthdr = IPPROTO_ESP;
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ ipv6_addr_copy(&top_iph->saddr,
+ (struct in6_addr *)&x->props.saddr);
+ ipv6_addr_copy(&top_iph->daddr,
+ (struct in6_addr *)&x->id.daddr);
+ } else {
+ esph = (struct ipv6_esp_hdr*)skb_push(*pskb, x->props.header_len);
+ (*pskb)->h.raw = (unsigned char*)esph;
+ top_iph = (struct ipv6hdr*)skb_push(*pskb, hdr_len);
+ memcpy(top_iph, iph, hdr_len);
+ kfree(iph);
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
+ *(u8*)(trailer->tail - 1) = nexthdr;
+ }
esph->spi = x->id.spi;
esph->seq_no = htonl(++x->replay.oseq);
pskb_put(*pskb, trailer, alen);
}
- err = 0;
+ (*pskb)->nh.raw = (*pskb)->data;
+
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ return NET_XMIT_BYPASS;
error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
return err;
}
u8 nexthdr[2];
struct scatterlist *sg = &esp->sgbuf[0];
u8 padlen;
+ u8 *prevhdr;
if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
- ret = nexthdr[1];
+ ip6_find_1stfragopt(skb, &prevhdr);
+ ret = *prevhdr = nexthdr[1];
}
out:
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
kfree_skb(skb);
return -1;
}
return 1;
}
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
kfree_skb(skb);
return -1;
}
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
skb->pkt_type != PACKET_HOST) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
kfree_skb(skb);
return -1;
}
}
if (hdr->type != IPV6_SRCRT_TYPE_0) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
return -1;
}
if (hdr->hdrlen & 0x01) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
return -1;
}
n = hdr->hdrlen >> 1;
if (hdr->segments_left > n) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
return -1;
}
kfree_skb(skb);
/* the copy is a forwarded packet */
if (skb2 == NULL) {
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS_BH(OutDiscards);
return -1;
}
*skbp = skb = skb2;
addr += i - 1;
if (ipv6_addr_is_multicast(addr)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
kfree_skb(skb);
return -1;
}
}
if (skb->dst->dev->flags&IFF_LOOPBACK) {
if (skb->nh.ipv6h->hop_limit <= 1) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
0, skb->dev);
kfree_skb(skb);
if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", skb->nh.raw[optoff+1]));
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2));
if (pkt_len <= IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
return 0;
}
if (skb->nh.ipv6h->payload_len) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
return 0;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+ IP6_INC_STATS_BH(InTruncatedPkts);
goto drop;
}
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
*/
dst = ip6_route_output(sk, fl);
if (dst->error) {
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
res = 1;
} else {
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6OutDestUnreachs, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
out_put:
if (likely(idev != NULL))
}
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutEchoReplies);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
out_put:
if (likely(idev != NULL))
struct icmp6hdr *hdr;
int type;
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InMsgs);
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
type = hdr->icmp6_type;
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InDestUnreachs, type - ICMPV6_DEST_UNREACH);
else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InEchos, type - ICMPV6_ECHO_REQUEST);
switch (type) {
case ICMPV6_ECHO_REQUEST:
break;
case ICMPV6_MGM_REDUCTION:
- case ICMPV6_NI_QUERY:
- case ICMPV6_NI_REPLY:
case ICMPV6_MLD2_REPORT:
- case ICMPV6_DHAAD_REQUEST:
- case ICMPV6_DHAAD_REPLY:
- case ICMPV6_MOBILE_PREFIX_SOL:
- case ICMPV6_MOBILE_PREFIX_ADV:
break;
default:
return 0;
discard_it:
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
kfree_skb(skb);
return 0;
}
static struct timer_list ip6_fib_timer = TIMER_INITIALIZER(fib6_run_gc, 0, 0);
-struct fib6_walker_t fib6_walker_list = {
+static struct fib6_walker_t fib6_walker_list = {
.prev = &fib6_walker_list,
.next = &fib6_walker_list,
};
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Label S Owner Users Linger Expires "
- "Dst Opt\n");
+ seq_printf(seq, "Label S Owner Users Linger Expires "
+ "Dst Opt\n");
else
ip6fl_fl_seq_show(seq, v);
return 0;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
+ IP6_INC_STATS_BH(InReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(InDiscards);
goto out;
}
goto err;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
goto truncated;
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr))){
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
hdr = skb->nh.ipv6h;
if (hdr->nexthdr == NEXTHDR_HOP) {
skb->h.raw = (u8*)(hdr+1);
if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
return 0;
}
hdr = skb->nh.ipv6h;
return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
truncated:
- IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+ IP6_INC_STATS_BH(InTruncatedPkts);
err:
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
out:
if (ret > 0)
goto resubmit;
else if (ret == 0)
- IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP6_INC_STATS_BH(InDelivers);
} else {
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+ IP6_INC_STATS_BH(InUnknownProtos);
icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
}
} else {
- IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP6_INC_STATS_BH(InDelivers);
kfree_skb(skb);
}
}
return 0;
discard:
- IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(InDiscards);
rcu_read_unlock();
kfree_skb(skb);
return 0;
struct ipv6hdr *hdr;
int deliver;
- IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
+ IP6_INC_STATS_BH(InMcastPkts);
hdr = skb->nh.ipv6h;
deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
} else if (dst->neighbour)
return dst->neighbour->output(skb);
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS_BH(OutNoRoutes);
kfree_skb(skb);
return -EINVAL;
ip6_dev_loopback_xmit);
if (skb->nh.ipv6h->hop_limit == 0) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
return 0;
}
}
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ IP6_INC_STATS(OutMcastPkts);
}
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
dst = ip6_route_output(skb->sk, &fl);
if (dst->error) {
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n"));
dst_release(dst);
kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return -ENOBUFS;
}
if (sk)
mtu = dst_pmtu(dst);
if ((skb->len <= mtu) || ipfragok) {
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
}
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
kfree_skb(skb);
return -EMSGSIZE;
}
goto error;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
- IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS(InDiscards);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
- IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS(InDiscards);
goto drop;
}
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev);
- IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
- IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS_BH(InTooBigErrors);
+ IP6_INC_STATS_BH(FragFails);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
goto drop;
}
hdr->hop_limit--;
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_INC_STATS_BH(OutForwDatagrams);
return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
error:
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
drop:
kfree_skb(skb);
return -EINVAL;
tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
if (!tmp_hdr) {
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return -ENOMEM;
}
kfree(tmp_hdr);
if (err == 0) {
- IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP6_INC_STATS(FragOKs);
return 0;
}
frag = skb;
}
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return err;
}
if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n"));
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
err = -ENOMEM;
goto fail;
}
* Put this fragment into the sending queue.
*/
- IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+ IP6_INC_STATS(FragCreates);
err = output(&frag);
if (err)
goto fail;
}
kfree_skb(skb);
- IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP6_INC_STATS(FragOKs);
return err;
fail:
kfree_skb(skb);
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return err;
}
return 0;
error:
inet->cork.length -= length;
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return err;
}
ipv6_addr_copy(&hdr->daddr, final_dst);
skb->dst = dst_clone(&rt->u.dst);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
if (err) {
if (err > 0)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
}
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ipcomp.h>
{
int err = 0;
u8 nexthdr = 0;
+ u8 *prevhdr;
int hdr_len = skb->h.raw - skb->nh.raw;
unsigned char *tmp_hdr = NULL;
struct ipv6hdr *iph;
iph = skb->nh.ipv6h;
iph->payload_len = htons(skb->len);
+ ip6_find_1stfragopt(skb, &prevhdr);
+ *prevhdr = nexthdr;
out:
if (tmp_hdr)
kfree(tmp_hdr);
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
- int hdr_len;
+ struct ipv6hdr *iph, *top_iph;
+ int hdr_len = 0;
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
+ u8 *prevhdr;
+ u8 nexthdr = 0;
int plen, dlen;
u8 *start, *scratch = ipcd->scratch;
- hdr_len = (*pskb)->h.raw - (*pskb)->data;
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
+
+ spin_lock_bh(&x->lock);
+
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
+
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+
+ hdr_len = sizeof(struct ipv6hdr);
+ nexthdr = IPPROTO_IPV6;
+ iph = (*pskb)->nh.ipv6h;
+ top_iph = (struct ipv6hdr *)skb_push(*pskb, sizeof(struct ipv6hdr));
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ top_iph->nexthdr = IPPROTO_IPV6; /* initial */
+ top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
+ memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
+ (*pskb)->nh.raw = (*pskb)->data; /* == top_iph */
+ (*pskb)->h.raw = (*pskb)->nh.raw + hdr_len;
+ } else {
+ hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
+ nexthdr = *prevhdr;
+ }
/* check whether datagram len is larger than threshold */
if (((*pskb)->len - hdr_len) < ipcd->threshold) {
/* compression */
plen = (*pskb)->len - hdr_len;
dlen = IPCOMP_SCRATCH_SIZE;
- start = (*pskb)->h.raw;
+ start = (*pskb)->data + hdr_len;
err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
if (err) {
pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
- top_iph = (struct ipv6hdr *)(*pskb)->data;
+ top_iph = (*pskb)->nh.ipv6h;
+ if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
+ IP6_ECN_clear(top_iph);
top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ (*pskb)->nh.raw = (*pskb)->data; /* top_iph */
+ ip6_find_1stfragopt(*pskb, &prevhdr);
+ *prevhdr = IPPROTO_COMP;
- ipch = (struct ipv6_comp_hdr *)start;
- ipch->nexthdr = *(*pskb)->nh.raw;
+ ipch = (struct ipv6_comp_hdr *)((unsigned char *)top_iph + hdr_len);
+ ipch->nexthdr = nexthdr;
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
- *(*pskb)->nh.raw = IPPROTO_COMP;
+ (*pskb)->h.raw = (unsigned char*)ipch;
out_ok:
- err = 0;
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
-error:
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ err = NET_XMIT_BYPASS;
+
+out_exit:
return err;
+error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
+ goto out_exit;
}
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
+ if (type != ICMPV6_DEST_UNREACH || type != ICMPV6_PKT_TOOBIG)
return;
spi = ntohl(ntohs(ipcomph->cpi));
struct inet6_dev *idev = in6_dev_get(skb->dev);
int err;
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h -
sizeof(struct ipv6hdr);
mldlen = skb->tail - skb->h.raw;
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
if (!err) {
- ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS);
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ ICMP6_INC_STATS(idev,Icmp6OutMsgs);
+ IP6_INC_STATS(OutMcastPkts);
} else
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
if (likely(idev != NULL))
in6_dev_put(idev);
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
snd_addr = addr;
if (type == ICMPV6_MGM_REDUCTION) {
snd_addr = &all_routers;
skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err);
if (skb == NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return;
}
dev_queue_xmit);
if (!err) {
if (type == ICMPV6_MGM_REDUCTION)
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBREDUCTIONS);
+ ICMP6_INC_STATS(idev, Icmp6OutGroupMembReductions);
else
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ ICMP6_INC_STATS(idev, Icmp6OutGroupMembResponses);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ IP6_INC_STATS(OutMcastPkts);
} else
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
if (likely(idev != NULL))
in6_dev_put(idev);
return;
out:
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
}
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutNeighborAdvertisements);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutNeighborSolicits);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutRouterSolicits);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
buff->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutRedirects);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
};
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp)
{
struct net_device *dev = ctl->extra1;
struct inet6_dev *idev;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
- return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(ctl, write, filp, buffer, lenp);
}
#endif
return 0;
}
-static struct snmp_mib snmp6_ipstats_list[] = {
+static struct snmp_item snmp6_ipstats_list[] = {
/* ipv6 mib according to RFC 2465 */
- SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES),
- SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
- SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS),
- SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES),
- SNMP_MIB_ITEM("Ip6InAddrErrors", IPSTATS_MIB_INADDRERRORS),
- SNMP_MIB_ITEM("Ip6InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
- SNMP_MIB_ITEM("Ip6InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
- SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
- SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
- SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
- SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS),
- SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
- SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
- SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
- SNMP_MIB_ITEM("Ip6ReasmReqds", IPSTATS_MIB_REASMREQDS),
- SNMP_MIB_ITEM("Ip6ReasmOKs", IPSTATS_MIB_REASMOKS),
- SNMP_MIB_ITEM("Ip6ReasmFails", IPSTATS_MIB_REASMFAILS),
- SNMP_MIB_ITEM("Ip6FragOKs", IPSTATS_MIB_FRAGOKS),
- SNMP_MIB_ITEM("Ip6FragFails", IPSTATS_MIB_FRAGFAILS),
- SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES),
- SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
- SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
- SNMP_MIB_SENTINEL
+#define SNMP6_GEN(x) SNMP_ITEM(struct ipstats_mib, x, "Ip6" #x)
+ SNMP6_GEN(InReceives),
+ SNMP6_GEN(InHdrErrors),
+ SNMP6_GEN(InTooBigErrors),
+ SNMP6_GEN(InNoRoutes),
+ SNMP6_GEN(InAddrErrors),
+ SNMP6_GEN(InUnknownProtos),
+ SNMP6_GEN(InTruncatedPkts),
+ SNMP6_GEN(InDiscards),
+ SNMP6_GEN(InDelivers),
+ SNMP6_GEN(OutForwDatagrams),
+ SNMP6_GEN(OutRequests),
+ SNMP6_GEN(OutDiscards),
+ SNMP6_GEN(OutNoRoutes),
+ SNMP6_GEN(ReasmTimeout),
+ SNMP6_GEN(ReasmReqds),
+ SNMP6_GEN(ReasmOKs),
+ SNMP6_GEN(ReasmFails),
+ SNMP6_GEN(FragOKs),
+ SNMP6_GEN(FragFails),
+ SNMP6_GEN(FragCreates),
+ SNMP6_GEN(InMcastPkts),
+ SNMP6_GEN(OutMcastPkts),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
-static struct snmp_mib snmp6_icmp6_list[] = {
+static struct snmp_item snmp6_icmp6_list[] = {
/* icmpv6 mib according to RFC 2466
Exceptions: {In|Out}AdminProhibs are removed, because I see
OutRouterAdvertisements too.
OutGroupMembQueries too.
*/
- SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
- SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
- SNMP_MIB_ITEM("Icmp6InDestUnreachs", ICMP6_MIB_INDESTUNREACHS),
- SNMP_MIB_ITEM("Icmp6InPktTooBigs", ICMP6_MIB_INPKTTOOBIGS),
- SNMP_MIB_ITEM("Icmp6InTimeExcds", ICMP6_MIB_INTIMEEXCDS),
- SNMP_MIB_ITEM("Icmp6InParmProblems", ICMP6_MIB_INPARMPROBLEMS),
- SNMP_MIB_ITEM("Icmp6InEchos", ICMP6_MIB_INECHOS),
- SNMP_MIB_ITEM("Icmp6InEchoReplies", ICMP6_MIB_INECHOREPLIES),
- SNMP_MIB_ITEM("Icmp6InGroupMembQueries", ICMP6_MIB_INGROUPMEMBQUERIES),
- SNMP_MIB_ITEM("Icmp6InGroupMembResponses", ICMP6_MIB_INGROUPMEMBRESPONSES),
- SNMP_MIB_ITEM("Icmp6InGroupMembReductions", ICMP6_MIB_INGROUPMEMBREDUCTIONS),
- SNMP_MIB_ITEM("Icmp6InRouterSolicits", ICMP6_MIB_INROUTERSOLICITS),
- SNMP_MIB_ITEM("Icmp6InRouterAdvertisements", ICMP6_MIB_INROUTERADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6InNeighborSolicits", ICMP6_MIB_INNEIGHBORSOLICITS),
- SNMP_MIB_ITEM("Icmp6InNeighborAdvertisements", ICMP6_MIB_INNEIGHBORADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6InRedirects", ICMP6_MIB_INREDIRECTS),
- SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
- SNMP_MIB_ITEM("Icmp6OutDestUnreachs", ICMP6_MIB_OUTDESTUNREACHS),
- SNMP_MIB_ITEM("Icmp6OutPktTooBigs", ICMP6_MIB_OUTPKTTOOBIGS),
- SNMP_MIB_ITEM("Icmp6OutTimeExcds", ICMP6_MIB_OUTTIMEEXCDS),
- SNMP_MIB_ITEM("Icmp6OutParmProblems", ICMP6_MIB_OUTPARMPROBLEMS),
- SNMP_MIB_ITEM("Icmp6OutEchoReplies", ICMP6_MIB_OUTECHOREPLIES),
- SNMP_MIB_ITEM("Icmp6OutRouterSolicits", ICMP6_MIB_OUTROUTERSOLICITS),
- SNMP_MIB_ITEM("Icmp6OutNeighborSolicits", ICMP6_MIB_OUTNEIGHBORSOLICITS),
- SNMP_MIB_ITEM("Icmp6OutNeighborAdvertisements", ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6OutRedirects", ICMP6_MIB_OUTREDIRECTS),
- SNMP_MIB_ITEM("Icmp6OutGroupMembResponses", ICMP6_MIB_OUTGROUPMEMBRESPONSES),
- SNMP_MIB_ITEM("Icmp6OutGroupMembReductions", ICMP6_MIB_OUTGROUPMEMBREDUCTIONS),
- SNMP_MIB_SENTINEL
+#define SNMP6_GEN(x) SNMP_ITEM(struct icmpv6_mib, x, #x)
+ SNMP6_GEN(Icmp6InMsgs),
+ SNMP6_GEN(Icmp6InErrors),
+ SNMP6_GEN(Icmp6InDestUnreachs),
+ SNMP6_GEN(Icmp6InPktTooBigs),
+ SNMP6_GEN(Icmp6InTimeExcds),
+ SNMP6_GEN(Icmp6InParmProblems),
+ SNMP6_GEN(Icmp6InEchos),
+ SNMP6_GEN(Icmp6InEchoReplies),
+ SNMP6_GEN(Icmp6InGroupMembQueries),
+ SNMP6_GEN(Icmp6InGroupMembResponses),
+ SNMP6_GEN(Icmp6InGroupMembReductions),
+ SNMP6_GEN(Icmp6InRouterSolicits),
+ SNMP6_GEN(Icmp6InRouterAdvertisements),
+ SNMP6_GEN(Icmp6InNeighborSolicits),
+ SNMP6_GEN(Icmp6InNeighborAdvertisements),
+ SNMP6_GEN(Icmp6InRedirects),
+ SNMP6_GEN(Icmp6OutMsgs),
+ SNMP6_GEN(Icmp6OutDestUnreachs),
+ SNMP6_GEN(Icmp6OutPktTooBigs),
+ SNMP6_GEN(Icmp6OutTimeExcds),
+ SNMP6_GEN(Icmp6OutParmProblems),
+ SNMP6_GEN(Icmp6OutEchoReplies),
+ SNMP6_GEN(Icmp6OutRouterSolicits),
+ SNMP6_GEN(Icmp6OutNeighborSolicits),
+ SNMP6_GEN(Icmp6OutNeighborAdvertisements),
+ SNMP6_GEN(Icmp6OutRedirects),
+ SNMP6_GEN(Icmp6OutGroupMembResponses),
+ SNMP6_GEN(Icmp6OutGroupMembReductions),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
-static struct snmp_mib snmp6_udp6_list[] = {
- SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
- SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
- SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
- SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
- SNMP_MIB_SENTINEL
+static struct snmp_item snmp6_udp6_list[] = {
+#define SNMP6_GEN(x) SNMP_ITEM(struct udp_mib, Udp##x, "Udp6" #x)
+ SNMP6_GEN(InDatagrams),
+ SNMP6_GEN(NoPorts),
+ SNMP6_GEN(InErrors),
+ SNMP6_GEN(OutDatagrams),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
static unsigned long
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
- res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
+ res +=
+ *((unsigned long *) (((void *)per_cpu_ptr(mib[0], i)) +
+ offt));
+ res +=
+ *((unsigned long *) (((void *)per_cpu_ptr(mib[1], i)) +
+ offt));
}
return res;
}
static inline void
-snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
+snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_item *itemlist)
{
int i;
for (i=0; itemlist[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
- fold_field(mib, itemlist[i].entry));
+ fold_field(mib, itemlist[i].offset));
}
static int snmp6_seq_show(struct seq_file *seq, void *v)
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len;
out_free:
skb_free_datagram(sk, skb);
if (err)
goto error_fault;
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = -EFAULT;
kfree_skb(skb);
error:
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return err;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct proto rawv6_prot = {
.name = "RAW",
.close = rawv6_close,
- .connect = ip6_datagram_connect,
+ .connect = udpv6_connect,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
spin_unlock(&fq->lock);
fq_put(fq);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
}
}
fq_kill(fq);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmTimeout);
+ IP6_INC_STATS_BH(ReasmFails);
/* Send error only if the first segment arrived. */
if (fq->last_in&FIRST_IN && fq->fragments) {
return ip6_frag_intern(hash, fq);
oom:
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
return NULL;
}
((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
return;
}
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return;
return;
err:
- IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS(ReasmFails);
kfree_skb(skb);
}
if (head->ip_summed == CHECKSUM_HW)
head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP6_INC_STATS_BH(ReasmOKs);
fq->fragments = NULL;
*nhoffp = nhoff;
return 1;
if (net_ratelimit())
printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
return -1;
}
hdr = skb->nh.ipv6h;
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+ IP6_INC_STATS_BH(ReasmReqds);
/* Jumbo payload inhibits frag. header */
if (hdr->payload_len==0) {
- IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
- IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->h.raw += sizeof(struct frag_hdr);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP6_INC_STATS_BH(ReasmOKs);
*nhoffp = (u8*)fhdr - skb->nh.raw;
return 1;
return ret;
}
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
kfree_skb(skb);
return -1;
}
/* Protected by rt6_lock. */
static struct dst_entry *ndisc_dst_gc_list;
static int ipv6_get_mtu(struct net_device *dev);
-
-static inline unsigned int ipv6_advmss(unsigned int mtu)
-{
- mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
-
- if (mtu < ip6_rt_min_advmss)
- mtu = ip6_rt_min_advmss;
-
- /*
- * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
- * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
- * IPV6_MAXPLEN is also valid and means: "any MSS,
- * rely only on pmtu discovery"
- */
- if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
- mtu = IPV6_MAXPLEN;
- return mtu;
-}
+static inline unsigned int ipv6_advmss(unsigned int mtu);
struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
return mtu;
}
+static inline unsigned int ipv6_advmss(unsigned int mtu)
+{
+ mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+
+ if (mtu < ip6_rt_min_advmss)
+ mtu = ip6_rt_min_advmss;
+
+ /*
+ * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+ * IPV6_MAXPLEN is also valid and means: "any MSS,
+ * rely only on pmtu discovery"
+ */
+ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
+ mtu = IPV6_MAXPLEN;
+ return mtu;
+}
+
static int ipv6_get_hoplimit(struct net_device *dev)
{
int hoplimit = ipv6_devconf.hop_limit;
int ip6_pkt_discard(struct sk_buff *skb)
{
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
kfree_skb(skb);
return 0;
static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(ctl, write, filp, buffer, lenp);
if (flush_delay < 0)
flush_delay = 0;
fib6_run_gc((unsigned long)flush_delay);
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
-#include <net/addrconf.h>
-#include <net/snmp.h>
#include <asm/uaccess.h>
/* Silly. Should hash-dance instead... */
local_bh_disable();
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
local_bh_enable();
tcp_tw_put(tw);
sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), Icmp6InErrors);
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
BUG_TRAP(req->sk == NULL);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
case TCP_SYN_RECV: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
/* sk = NULL, but it is safe for now. RST socket required. */
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
+ TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TcpOutRsts);
return;
}
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TcpOutSegs);
return;
}
if (req)
tcp_openreq_free(req);
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
return 0; /* don't send reset */
}
return newsk;
out_overflow:
- NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+ NET_INC_STATS_BH(ListenOverflows);
out:
- NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+ NET_INC_STATS_BH(ListenDrops);
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
kfree_skb(skb);
return 0;
csum_err:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
/*
* Count it even if it's bad.
*/
- TCP_INC_STATS_BH(TCP_MIB_INSEGS);
+ TCP_INC_STATS_BH(TcpInSegs);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
} else {
tcp_v6_send_reset(skb);
}
}
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
- seq_puts(seq,
- " sl "
- "local_address "
- "remote_address "
- "st tx_queue rx_queue tr tm->when retrnsmt"
- " uid timeout inode\n");
+ seq_printf(seq,
+ " sl "
+ "local_address "
+ "remote_address "
+ "st tx_queue rx_queue tr tm->when retrnsmt"
+ " uid timeout inode\n");
goto out;
}
st = seq->private;
#include <net/addrconf.h>
#include <net/ip.h>
#include <net/udp.h>
-#include <net/raw.h>
#include <net/inet_common.h>
#include <net/ip6_checksum.h>
*
*/
+int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct in6_addr *daddr;
+ struct dst_entry *dst;
+ struct flowi fl;
+ struct ip6_flowlabel *flowlabel = NULL;
+ int addr_type;
+ int err;
+
+ if (usin->sin6_family == AF_INET) {
+ if (__ipv6_only_sock(sk))
+ return -EAFNOSUPPORT;
+ err = udp_connect(sk, uaddr, addr_len);
+ goto ipv4_connected;
+ }
+
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ if (usin->sin6_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ memset(&fl, 0, sizeof(fl));
+ if (np->sndflow) {
+ fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ }
+ }
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
+ if (addr_type == IPV6_ADDR_ANY) {
+ /*
+ * connect to self
+ */
+ usin->sin6_addr.s6_addr[15] = 0x01;
+ }
+
+ daddr = &usin->sin6_addr;
+
+ if (addr_type == IPV6_ADDR_MAPPED) {
+ struct sockaddr_in sin;
+
+ if (__ipv6_only_sock(sk)) {
+ err = -ENETUNREACH;
+ goto out;
+ }
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = daddr->s6_addr32[3];
+ sin.sin_port = usin->sin6_port;
+
+ err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin));
+
+ipv4_connected:
+ if (err)
+ goto out;
+
+ ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
+
+ if (ipv6_addr_any(&np->saddr)) {
+ ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
+ inet->saddr);
+ }
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
+ inet->rcv_saddr);
+ }
+ goto out;
+ }
+
+ if (addr_type&IPV6_ADDR_LINKLOCAL) {
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ usin->sin6_scope_id) {
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id) {
+ err = -EINVAL;
+ goto out;
+ }
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
+ if (!sk->sk_bound_dev_if &&
+ (addr_type & IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+ }
+
+ /* Connect to link-local address requires an interface */
+ if (!sk->sk_bound_dev_if) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ ipv6_addr_copy(&np->daddr, daddr);
+ np->flow_label = fl.fl6_flowlabel;
+
+ inet->dport = usin->sin6_port;
+
+ /*
+ * Check for a route to destination an obtain the
+ * destination cache for it.
+ */
+
+ fl.proto = IPPROTO_UDP;
+ ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
+ ipv6_addr_copy(&fl.fl6_src, &np->saddr);
+ fl.oif = sk->sk_bound_dev_if;
+ fl.fl_ip_dport = inet->dport;
+ fl.fl_ip_sport = inet->sport;
+
+ if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+
+ if (flowlabel) {
+ if (flowlabel->opt && flowlabel->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+ } else if (np->opt && np->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+
+ err = ip6_dst_lookup(sk, &dst, &fl);
+ if (err)
+ goto out;
+
+ /* source address lookup done in ip6_dst_lookup */
+
+ if (ipv6_addr_any(&np->saddr))
+ ipv6_addr_copy(&np->saddr, &fl.fl6_src);
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
+ inet->rcv_saddr = LOOPBACK4_IPV6;
+ }
+
+ ip6_dst_store(sk, dst,
+ !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
+ &np->daddr : NULL);
+
+ sk->sk_state = TCP_ESTABLISHED;
+out:
+ fl6_sock_release(flowlabel);
+ return err;
+}
+
static void udpv6_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
}
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
skb_free_datagram(sk, skb);
if (flags & MSG_DONTWAIT) {
- UDP6_INC_STATS_USER(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_USER(UdpInErrors);
return -EAGAIN;
}
goto try_again;
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return 0;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return 0;
}
- UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP6_INC_STATS_BH(UdpInDatagrams);
return 0;
}
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
(unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
goto discard;
- UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
+ UDP6_INC_STATS_BH(UdpNoPorts);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len);
discard:
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
}
out:
fl6_sock_release(flowlabel);
if (!err) {
- UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+ UDP6_INC_STATS_USER(UdpOutDatagrams);
return len;
}
return err;
struct proto udpv6_prot = {
.name = "UDP",
.close = udpv6_close,
- .connect = ip6_datagram_connect,
+ .connect = udpv6_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
- int nexthdr;
- unsigned int nhoff;
+ int nexthdr = 0;
+ u8 *prevhdr = NULL;
- nhoff = *nhoffp;
- nexthdr = skb->nh.raw[nhoff];
+ ip6_find_1stfragopt(skb, &prevhdr);
+ nexthdr = *prevhdr;
+ *nhoffp = prevhdr - skb->nh.raw;
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
if (nexthdr <= 0)
goto drop_unlock;
- skb->nh.raw[nhoff] = nexthdr;
-
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
+++ /dev/null
-/*
- * xfrm6_output.c - Common IPsec encapsulation code for IPv6.
- * Copyright (C) 2002 USAGI/WIDE Project
- * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/icmpv6.h>
-#include <net/inet_ecn.h>
-#include <net/ipv6.h>
-#include <net/xfrm.h>
-
-/* Add encapsulation header.
- *
- * In transport mode, the IP header and mutable extension headers will be moved
- * forward to make space for the encapsulation header.
- *
- * In tunnel mode, the top IP header will be constructed per RFC 2401.
- * The following fields in it shall be filled in by x->type->output:
- * payload_len
- *
- * On exit, skb->h will be set to the start of the encapsulation header to be
- * filled in by x->type->output and skb->nh will be set to the nextheader field
- * of the extension header directly preceding the encapsulation header, or in
- * its absence, that of the top IP header. The value of skb->data will always
- * point to the top IP header.
- */
-static void xfrm6_encap(struct sk_buff *skb)
-{
- struct dst_entry *dst = skb->dst;
- struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph, *top_iph;
-
- skb_push(skb, x->props.header_len);
- iph = skb->nh.ipv6h;
-
- if (!x->props.mode) {
- u8 *prevhdr;
- int hdr_len;
-
- hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
- skb->nh.raw = prevhdr - x->props.header_len;
- skb->h.raw = skb->data + hdr_len;
- memmove(skb->data, iph, hdr_len);
- return;
- }
-
- skb->nh.raw = skb->data;
- top_iph = skb->nh.ipv6h;
- skb->nh.raw = &top_iph->nexthdr;
- skb->h.ipv6h = top_iph + 1;
-
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- if (x->props.flags & XFRM_STATE_NOECN)
- IP6_ECN_clear(top_iph);
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- top_iph->nexthdr = IPPROTO_IPV6;
- top_iph->hop_limit = iph->hop_limit;
- ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
-}
-
-static int xfrm6_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst = skb->dst;
-
- mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
-
- if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- ret = -EMSGSIZE;
- }
-
- return ret;
-}
-
-int xfrm6_output(struct sk_buff **pskb)
-{
- struct sk_buff *skb = *pskb;
- struct dst_entry *dst = skb->dst;
- struct xfrm_state *x = dst->xfrm;
- int err;
-
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(pskb, 0);
- skb = *pskb;
- if (err)
- goto error_nolock;
- }
-
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, skb);
- if (err)
- goto error;
-
- if (x->props.mode) {
- err = xfrm6_tunnel_check_size(skb);
- if (err)
- goto error;
- }
-
- xfrm6_encap(skb);
-
- err = x->type->output(pskb);
- skb = *pskb;
- if (err)
- goto error;
-
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
-
- spin_unlock_bh(&x->lock);
-
- skb->nh.raw = skb->data;
-
- if (!(skb->dst = dst_pop(dst))) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- err = NET_XMIT_BYPASS;
-
-out_exit:
- return err;
-error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(skb);
- goto out_exit;
-}
/* Copy neighbour for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- dst_prev->output = xfrm6_output;
+ dst_prev->output = dst_prev->xfrm->type->output;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
#include <linux/ipsec.h>
#include <net/ipv6.h>
-static struct xfrm_state_afinfo xfrm6_state_afinfo;
+extern struct xfrm_state_afinfo xfrm6_state_afinfo;
static void
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
proto == x->id.proto &&
!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)x->props.saddr.a6) &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ &&
- !x->id.spi) {
+ x->km.state == XFRM_STATE_ACQ) {
+ if (!x0)
+ x0 = x;
+ if (x->id.spi)
+ continue;
x0 = x;
break;
}
#include <linux/list.h>
#include <net/ip.h>
#include <net/xfrm.h>
+#include <net/icmp.h>
#include <net/ipv6.h>
-#include <net/protocol.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
+int xfrm6_tunnel_check_size(struct sk_buff *skb)
+{
+ int mtu, ret = 0;
+ struct dst_entry *dst = skb->dst;
+
+ mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+ if (skb->len > mtu) {
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ ret = -EMSGSIZE;
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(xfrm6_tunnel_check_size);
+
static int xfrm6_tunnel_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- struct ipv6hdr *top_iph;
-
- top_iph = (struct ipv6hdr *)skb->data;
+ struct dst_entry *dst = skb->dst;
+ struct xfrm_state *x = dst->xfrm;
+ struct ipv6hdr *iph, *top_iph;
+ int err;
+
+ if ((err = xfrm6_tunnel_check_size(skb)) != 0)
+ goto error_nolock;
+
+ iph = skb->nh.ipv6h;
+
+ top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len);
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ top_iph->nexthdr = IPPROTO_IPV6;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
+ memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
+ skb->nh.raw = skb->data;
+ skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
- return 0;
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
+
+ spin_unlock_bh(&x->lock);
+
+ if ((skb->dst = dst_pop(dst)) == NULL) {
+ kfree_skb(skb);
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+
+ return NET_XMIT_BYPASS;
+
+error_nolock:
+ kfree_skb(skb);
+ return err;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
/* Check that we don't send out to big frames */
if (len > self->max_data_size) {
- IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
+ IRDA_DEBUG(2, "%s(), Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
copied = skb->len;
if (copied > size) {
- IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
+ IRDA_DEBUG(2, "%s(), Received truncated frame (%d < %d)!\n",
__FUNCTION__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
+ "Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
+ "Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
* us on that - Jean II */
static int do_devname(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
- ret = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(table, write, filp, buffer, lenp);
if (ret == 0 && write) {
struct ias_value *val;
min_spi = range->sadb_spirange_min;
max_spi = range->sadb_spirange_max;
} else {
- min_spi = 0x100;
- max_spi = 0x0fffffff;
+ min_spi = htonl(0x100);
+ max_spi = htonl(0x0fffffff);
}
- xfrm_alloc_spi(x, htonl(min_spi), htonl(max_spi));
+ xfrm_alloc_spi(x, min_spi, max_spi);
if (x->id.spi)
resp_skb = pfkey_xfrm_state2msg(x, 0, 3);
}
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
module_init(nr_proto_init);
-module_param(nr_ndevs, int, 0);
+
+MODULE_PARM(nr_ndevs, "i");
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
{
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_ON(sk->sk_nx_info);
- BUG_ON(sk->sk_vx_info);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk);
sk = pt->af_packet_priv;
po = pkt_sk(sk);
- if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
- goto drop;
-
skb->dev = dev;
if (dev->hard_header) {
}
#endif
- clr_vx_info(&sk->sk_vx_info);
- clr_nx_info(&sk->sk_nx_info);
-
/*
* Now the socket is dead. No more input will appear.
*/
sk->sk_destruct = packet_sock_destruct;
atomic_inc(&packet_socks_nr);
- set_vx_info(&sk->sk_vx_info, current->vx_info);
- sk->sk_xid = vx_current_xid();
- set_nx_info(&sk->sk_nx_info, current->nx_info);
- sk->sk_nid = nx_current_nid();
-
/*
* Attach a protocol block
*/
.mmap = packet_mmap,
.sendpage = sock_no_sendpage,
};
-EXPORT_SYMBOL(packet_ops);
-struct net_proto_family packet_family_ops = {
+static struct net_proto_family packet_family_ops = {
.family = PF_PACKET,
.create = packet_create,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(packet_family_ops);
static struct notifier_block packet_netdev_notifier = {
.notifier_call =packet_notifier,
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
-static HLIST_HEAD(rose_list);
+HLIST_HEAD(rose_list);
spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops rose_proto_ops;
}
module_init(rose_proto_init);
-module_param(rose_ndevs, int, 0);
+MODULE_PARM(rose_ndevs, "i");
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
struct rxrpc_message *msg)
{
struct msghdr msghdr;
+ mm_segment_t oldfs;
int ret;
_enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
/* set up the message to be transmitted */
msghdr.msg_name = &conn->addr;
msghdr.msg_namelen = sizeof(conn->addr);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msghdr.msg_iov = (struct iovec *)msg->data;
+ msghdr.msg_iovlen = msg->dcount;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
htons(conn->addr.sin_port));
/* send the message */
- ret = kernel_sendmsg(conn->trans->socket, &msghdr,
- msg->data, msg->dcount, msg->dsize);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(conn->trans->socket, &msghdr, msg->dsize);
+ set_fs(oldfs);
+
if (ret < 0) {
msg->state = RXRPC_MSG_ERROR;
- } else {
+ }
+ else {
msg->state = RXRPC_MSG_SENT;
ret = 0;
struct sockaddr_in sin;
struct msghdr msghdr;
struct kvec iov[2];
+ mm_segment_t oldfs;
uint32_t _error;
int len, ret;
msghdr.msg_name = &sin;
msghdr.msg_namelen = sizeof(sin);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msghdr.msg_iov = (struct iovec *)iov;
+ msghdr.msg_iovlen = 2;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_DONTWAIT;
htons(sin.sin_port));
/* send the message */
- ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(trans->socket, &msghdr, len);
+ set_fs(oldfs);
_leave(" = %d", ret);
return ret;
struct list_head connq, *_p;
struct errormsg emsg;
struct msghdr msg;
+ mm_segment_t oldfs;
uint16_t port;
int local, err;
/* try and receive an error message */
msg.msg_name = &sin;
msg.msg_namelen = sizeof(sin);
+ msg.msg_iov = NULL;
+ msg.msg_iovlen = 0;
msg.msg_control = &emsg;
msg.msg_controllen = sizeof(emsg);
msg.msg_flags = 0;
- err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sock_recvmsg(trans->socket, &msg, 0,
MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
+ set_fs(oldfs);
if (err == -EAGAIN) {
_leave("");
#
# Traffic control configuration.
#
-choice
- prompt "Packet scheduler clock source"
- depends on NET_SCHED
- default NET_SCH_CLK_JIFFIES
- help
- Packet schedulers need a monotonic clock that increments at a static
- rate. The kernel provides several suitable interfaces, each with
- different properties:
-
- - high resolution (us or better)
- - fast to read (minimal locking, no i/o access)
- - synchronized on all processors
- - handles cpu clock frequency changes
-
- but nothing provides all of the above.
-
-config NET_SCH_CLK_JIFFIES
- bool "Timer interrupt"
- help
- Say Y here if you want to use the timer interrupt (jiffies) as clock
- source. This clock source is fast, synchronized on all processors and
- handles cpu clock frequency changes, but its resolution is too low
- for accurate shaping except at very low speed.
-
-config NET_SCH_CLK_GETTIMEOFDAY
- bool "gettimeofday"
- help
- Say Y here if you want to use gettimeofday as clock source. This clock
- source has high resolution, is synchronized on all processors and
- handles cpu clock frequency changes, but it is slow.
-
- Choose this if you need a high resolution clock source but can't use
- the CPU's cycle counter.
-
-config NET_SCH_CLK_CPU
- bool "CPU cycle counter"
- depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64
- help
- Say Y here if you want to use the CPU's cycle counter as clock source.
- This is a cheap and high resolution clock source, but on some
- architectures it is not synchronized on all processors and doesn't
- handle cpu clock frequency changes.
-
- The useable cycle counters are:
-
- x86/x86_64 - Timestamp Counter
- alpha - Cycle Counter
- sparc64 - %ticks register
- ppc64 - Time base
- ia64 - Interval Time Counter
-
- Choose this if your CPU's cycle counter is working properly.
-
-endchoice
-
config NET_SCH_CBQ
tristate "CBQ packet scheduler"
depends on NET_SCHED
testing applications or protocols.
To compile this driver as a module, choose M here: the module
- will be called sch_netem.
-
- If unsure, say N.
+ will be called sch_delay.
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
kfree(tp);
goto errout;
}
-
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
tp->next = *back;
*back = tp;
- qdisc_unlock_tree(dev);
-
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
goto errout;
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
*back = tp->next;
- qdisc_unlock_tree(dev);
-
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
tfilter_notify(skb, n, tp, fh_s, RTM_DELTFILTER);
tcf_destroy(tp);
err = 0;
return err;
}
-unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
- unsigned long cl)
-{
- unsigned long old_cl;
-
- tcf_tree_lock(tp);
- old_cl = __cls_set_class(clp, cl);
- tcf_tree_unlock(tp);
-
- return old_cl;
-}
-
-
static int
tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
u32 pid, u32 seq, unsigned flags, int event)
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
- read_lock_bh(&qdisc_tree_lock);
+ read_lock(&qdisc_tree_lock);
if (!tcm->tcm_parent)
q = dev->qdisc_sleeping;
else
if (cl)
cops->put(q, cl);
out:
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
dev_put(dev);
return skb->len;
}
EXPORT_SYMBOL(register_tcf_proto_ops);
EXPORT_SYMBOL(unregister_tcf_proto_ops);
-EXPORT_SYMBOL(tcf_set_class);
struct tc_u_hnode *ht_up;
#ifdef CONFIG_NET_CLS_ACT
struct tc_action *action;
+#ifdef CONFIG_NET_CLS_IND
+ char indev[IFNAMSIZ];
+#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
struct tcf_police *police;
#endif
-#endif
-#ifdef CONFIG_NET_CLS_IND
- char indev[IFNAMSIZ];
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode *ht_down;
-#ifdef CONFIG_CLS_U32_PERF
- struct tc_u32_pcnt *pf;
-#endif
struct tc_u32_sel sel;
};
int sdepth = 0;
int off2 = 0;
int sel = 0;
-#ifdef CONFIG_CLS_U32_PERF
- int j;
-#endif
int i;
next_ht:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt +=1;
- j = 0;
+ n->sel.rcnt +=1;
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] +=1;
- j++;
+ key->kcnt +=1;
#endif
}
if (n->ht_down == NULL) {
if (n->sel.flags&TC_U32_TERMINAL) {
*res = n->res;
+#ifdef CONFIG_NET_CLS_ACT
#ifdef CONFIG_NET_CLS_IND
/* yes, i know it sucks but the feature is
** optional dammit! - JHS */
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit +=1;
+ n->sel.rhit +=1;
#endif
-#ifdef CONFIG_NET_CLS_ACT
if (n->action) {
int pol_res = tcf_action_exec(skb, n->action);
if (skb->tc_classid > 0) {
#endif
if (n->ht_down)
n->ht_down->refcnt--;
-#ifdef CONFIG_CLS_U32_PERF
- if (n && (NULL != n->pf))
- kfree(n->pf);
-#endif
kfree(n);
return 0;
}
tcf_action_destroy(act, TCA_ACT_UNBIND);
}
-
-#else
-#ifdef CONFIG_NET_CLS_POLICE
- if (tb[TCA_U32_POLICE-1]) {
- struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
- sch_tree_lock(q);
- police = xchg(&n->police, police);
- sch_tree_unlock(q);
- tcf_police_release(police, TCA_ACT_UNBIND);
- }
-#endif
-#endif
#ifdef CONFIG_NET_CLS_IND
n->indev[0] = 0;
if(tb[TCA_U32_INDEV-1]) {
return -EINVAL;
}
sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
- printk("got IND %s\n",n->indev);
}
#endif
+#else
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_U32_POLICE-1]) {
+ struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
+ sch_tree_lock(q);
+ police = xchg(&n->police, police);
+ sch_tree_unlock(q);
+ tcf_police_release(police, TCA_ACT_UNBIND);
+ }
+#endif
+#endif
+
return 0;
}
s = RTA_DATA(tb[TCA_U32_SEL-1]);
+#ifdef CONFIG_CLS_U32_PERF
+ if (RTA_PAYLOAD(tb[TCA_U32_SEL-1]) <
+ (s->nkeys*sizeof(struct tc_u32_key)) + sizeof(struct tc_u32_sel)) {
+ printk("Please upgrade your iproute2 tools or compile proper options in!\n");
+ return -EINVAL;
+}
+#endif
n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
-
memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
-#ifdef CONFIG_CLS_U32_PERF
- n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL);
- if (n->pf == NULL) {
- kfree(n);
- return -ENOBUFS;
- }
- memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));
-#endif
-
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
*arg = (unsigned long)n;
return 0;
}
-#ifdef CONFIG_CLS_U32_PERF
- if (n && (NULL != n->pf))
- kfree(n->pf);
-#endif
kfree(n);
return err;
}
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
+#ifdef CONFIG_NET_CLS_IND
+ if(strlen(n->indev)) {
+ struct rtattr * p_rta = (struct rtattr*)skb->tail;
+ RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
+ p_rta->rta_len = skb->tail - (u8*)p_rta;
+ }
+#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
}
#endif
-#endif
-
-#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev)) {
- struct rtattr * p_rta = (struct rtattr*)skb->tail;
- RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
- p_rta->rta_len = skb->tail - (u8*)p_rta;
- }
-#endif
-#ifdef CONFIG_CLS_U32_PERF
- RTA_PUT(skb, TCA_U32_PCNT,
- sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64),
- n->pf);
#endif
}
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_ACT
- if (TC_U32_KEY(n->handle) != 0) {
- if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
- if (tcf_action_copy_stats(skb,n->action))
- goto rtattr_failure;
- }
+ if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
+ if (tcf_action_copy_stats(skb,n->action))
+ goto rtattr_failure;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
static int __init init_u32(void)
{
- printk("u32 classifier\n");
-#ifdef CONFIG_CLS_U32_PERF
- printk(" Perfomance counters on\n");
-#endif
-#ifdef CONFIG_NET_CLS_POLICE
- printk(" OLD policer on \n");
-#endif
-#ifdef CONFIG_NET_CLS_IND
- printk(" input device check on \n");
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- printk(" Actions configured \n");
-#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
-#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
{
struct Qdisc *q;
- list_for_each_entry(q, &dev->qdisc_list, list) {
+ for (q = dev->qdisc_list; q; q = q->next) {
if (q->handle == handle)
return q;
}
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
if (qdisc && qdisc->flags&TCQ_F_INGRES) {
oqdisc = dev->qdisc_ingress;
/* Prune old scheduler */
dev->qdisc = &noop_qdisc;
}
- qdisc_unlock_tree(dev);
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
if (dev->flags & IFF_UP)
dev_activate(dev);
{
int err;
struct rtattr *kind = tca[TCA_KIND-1];
- void *p = NULL;
- struct Qdisc *sch;
+ struct Qdisc *sch = NULL;
struct Qdisc_ops *ops;
int size;
if (ops == NULL)
goto err_out;
- /* ensure that the Qdisc and the private data are 32-byte aligned */
- size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
- size += ops->priv_size + QDISC_ALIGN_CONST;
+ size = sizeof(*sch) + ops->priv_size;
- p = kmalloc(size, GFP_KERNEL);
+ sch = kmalloc(size, GFP_KERNEL);
err = -ENOBUFS;
- if (!p)
+ if (!sch)
goto err_out;
- memset(p, 0, size);
- sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
- sch->padded = (char *)sch - (char *)p;
/* Grrr... Resolve race condition with module unload */
if (ops != qdisc_lookup_ops(kind))
goto err_out;
- INIT_LIST_HEAD(&sch->list);
+ memset(sch, 0, size);
+
skb_queue_head_init(&sch->q);
if (handle == TC_H_INGRESS)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- dev_hold(dev);
atomic_set(&sch->refcnt, 1);
sch->stats_lock = &dev->queue_lock;
if (handle == 0) {
* before we set a netdevice's qdisc pointer to sch */
smp_wmb();
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
- qdisc_lock_tree(dev);
- list_add_tail(&sch->list, &dev->qdisc_list);
- qdisc_unlock_tree(dev);
-
+ write_lock(&qdisc_tree_lock);
+ sch->next = dev->qdisc_list;
+ dev->qdisc_list = sch;
+ write_unlock(&qdisc_tree_lock);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
qdisc_new_estimator(&sch->stats, sch->stats_lock,
err_out:
*errp = err;
- if (p)
- kfree(p);
+ if (sch)
+ kfree(sch);
return NULL;
}
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev->ifindex;
+ tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = atomic_read(&q->refcnt);
continue;
if (idx > s_idx)
s_q_idx = 0;
- read_lock_bh(&qdisc_tree_lock);
- q_idx = 0;
- list_for_each_entry(q, &dev->qdisc_list, list) {
- if (q_idx < s_q_idx) {
- q_idx++;
+ read_lock(&qdisc_tree_lock);
+ for (q = dev->qdisc_list, q_idx = 0; q;
+ q = q->next, q_idx++) {
+ if (q_idx < s_q_idx)
continue;
- }
if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
goto done;
}
- q_idx++;
}
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
}
done:
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev->ifindex;
+ tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
tcm->tcm_info = 0;
return 0;
s_t = cb->args[0];
- t = 0;
-
- read_lock_bh(&qdisc_tree_lock);
- list_for_each_entry(q, &dev->qdisc_list, list) {
- if (t < s_t || !q->ops->cl_ops ||
- (tcm->tcm_parent &&
- TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
- t++;
+
+ read_lock(&qdisc_tree_lock);
+ for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
+ if (t < s_t) continue;
+ if (!q->ops->cl_ops) continue;
+ if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)
continue;
- }
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
arg.w.fn = qdisc_class_dump;
cb->args[1] = arg.w.count;
if (arg.w.stop)
break;
- t++;
}
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
cb->args[0] = t;
};
#endif
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
int psched_tod_diff(int delta_sec, int bound)
{
int delta;
EXPORT_SYMBOL(psched_tod_diff);
#endif
-#ifdef CONFIG_NET_SCH_CLK_CPU
+psched_time_t psched_time_base;
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
psched_tdiff_t psched_clock_per_hz;
int psched_clock_scale;
EXPORT_SYMBOL(psched_clock_per_hz);
EXPORT_SYMBOL(psched_clock_scale);
+#endif
-psched_time_t psched_time_base;
-cycles_t psched_time_mark;
+#ifdef PSCHED_WATCHER
+PSCHED_WATCHER psched_time_mark;
EXPORT_SYMBOL(psched_time_mark);
EXPORT_SYMBOL(psched_time_base);
-/*
- * Periodically adjust psched_time_base to avoid overflow
- * with 32-bit get_cycles(). Safe up to 4GHz CPU.
- */
static void psched_tick(unsigned long);
+
static struct timer_list psched_timer = TIMER_INITIALIZER(psched_tick, 0, 0);
static void psched_tick(unsigned long dummy)
{
- if (sizeof(cycles_t) == sizeof(u32)) {
- psched_time_t dummy_stamp;
- PSCHED_GET_TIME(dummy_stamp);
- psched_timer.expires = jiffies + 1*HZ;
- add_timer(&psched_timer);
- }
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
+ psched_time_t dummy_stamp;
+ PSCHED_GET_TIME(dummy_stamp);
+ /* It is OK up to 4GHz cpu */
+ psched_timer.expires = jiffies + 1*HZ;
+#else
+ unsigned long now = jiffies;
+ psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
+ psched_time_mark = now;
+ psched_timer.expires = now + 60*60*HZ;
+#endif
+ add_timer(&psched_timer);
}
+#endif
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
int __init psched_calibrate_clock(void)
{
psched_time_t stamp, stamp1;
long rdelay;
unsigned long stop;
+#ifdef PSCHED_WATCHER
psched_tick(0);
+#endif
stop = jiffies + HZ/10;
PSCHED_GET_TIME(stamp);
do_gettimeofday(&tv);
{
struct rtnetlink_link *link_p;
-#ifdef CONFIG_NET_SCH_CLK_CPU
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
if (psched_calibrate_clock() < 0)
return -1;
-#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
+#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
psched_tick_per_us = HZ<<PSCHED_JSCALE;
psched_us_per_tick = 1000000;
+#ifdef PSCHED_WATCHER
+ psched_tick(0);
+#endif
#endif
link_p = rtnetlink_links[PF_UNSPEC];
*/
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct atm_qdisc_data *) (sch)->data)
#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
u32 classid)
{
- struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- for (flow = p->flows; flow; flow = flow->next)
+ for (flow = PRIV(sch)->flows; flow; flow = flow->next)
if (flow->classid == classid) break;
return flow;
}
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *head = &q->link;
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
static __inline__ void cbq_activate_class(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
int prio = cl->cpriority;
struct cbq_class *cl_tail;
static void cbq_deactivate_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
int prio = this->cpriority;
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
int len = skb->len;
int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
static int
cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
int ret;
static void cbq_ovl_classic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
struct cbq_class *this = cl;
do {
static void cbq_ovl_delay(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
cl->penalized = jiffies + cl->penalty;
static void cbq_undelay(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
long delay = 0;
unsigned pmask;
{
int len = skb->len;
struct Qdisc *sch = child->__parent;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
static __inline__ struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
struct cbq_class *this_cl = cl;
if (cl->tparent == NULL)
static __inline__ struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl_tail, *cl_prev, *cl;
struct sk_buff *skb;
int deficit;
static __inline__ struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct sk_buff *skb;
unsigned activemask;
cbq_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
psched_time_t now;
psched_tdiff_t incr;
static void cbq_sync_defmap(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
struct cbq_class *split = cl->split;
unsigned h;
int i;
static void cbq_unlink_class(struct cbq_class *this)
{
struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) {
if (cl == this) {
static void cbq_link_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
unsigned h = cbq_hash(this->classid);
struct cbq_class *parent = this->tparent;
static unsigned int cbq_drop(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
static void
cbq_reset(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
int prio;
unsigned h;
static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
if (wrr->allot)
cl->allot = wrr->allot;
static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct rtattr *tb[TCA_CBQ_MAX];
struct tc_ratespec *r;
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
unsigned char *b = skb->tail;
struct rtattr *rta;
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
static void
cbq_destroy(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
unsigned h;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
unsigned long *arg)
{
int err;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)*arg;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_CBQ_MAX];
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
if (cl->filters || cl->children || cl == &q->link)
static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl == NULL)
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *p = (struct cbq_class*)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
unsigned h;
if (arg->stop)
#endif
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct dsmark_qdisc_data *) (sch)->data)
/*
tcf_destroy(tp);
}
qdisc_destroy(p->q);
+ p->q = &noop_qdisc;
kfree(p->mask);
}
static int
bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->stats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
static int
pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->q.qlen < q->limit) {
__skb_queue_tail(&sch->q, skb);
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (void*)sch->data;
if (opt == NULL) {
unsigned int limit = sch->dev->tx_queue_len ? : 1;
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (void*)sch->data;
unsigned char *b = skb->tail;
struct tc_fifo_qopt opt;
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
-#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
The idea is the following:
- enqueue, dequeue are serialized via top level device
spinlock dev->queue_lock.
- - tree walking is protected by read_lock_bh(qdisc_tree_lock)
+ - tree walking is protected by read_lock(qdisc_tree_lock)
and this lock is used only in process context.
- - updates to tree are made under rtnl semaphore or
- from softirq context (__qdisc_destroy rcu-callback)
- hence this lock needs local bh disabling.
+ - updates to tree are made only under rtnl semaphore,
+ hence this lock may be made without local bh disabling.
qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
*/
rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
-void qdisc_lock_tree(struct net_device *dev)
-{
- write_lock_bh(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
-}
-
-void qdisc_unlock_tree(struct net_device *dev)
-{
- spin_unlock_bh(&dev->queue_lock);
- write_unlock_bh(&qdisc_tree_lock);
-}
-
/*
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
static int
pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
- list += prio2band[skb->priority&TC_PRIO_MAX];
+ list = ((struct sk_buff_head*)qdisc->data) +
+ prio2band[skb->priority&TC_PRIO_MAX];
if (list->qlen < qdisc->dev->tx_queue_len) {
__skb_queue_tail(list, skb);
pfifo_fast_dequeue(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
struct sk_buff *skb;
for (prio = 0; prio < 3; prio++, list++) {
static int
pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
- list += prio2band[skb->priority&TC_PRIO_MAX];
+ list = ((struct sk_buff_head*)qdisc->data) +
+ prio2band[skb->priority&TC_PRIO_MAX];
__skb_queue_head(list, skb);
qdisc->q.qlen++;
pfifo_fast_reset(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
for (prio=0; prio < 3; prio++)
skb_queue_purge(list+prio);
static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
int i;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
+
+ list = ((struct sk_buff_head*)qdisc->data);
for (i=0; i<3; i++)
skb_queue_head_init(list+i);
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
{
- void *p;
struct Qdisc *sch;
- int size;
-
- /* ensure that the Qdisc and the private data are 32-byte aligned */
- size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
- size += ops->priv_size + QDISC_ALIGN_CONST;
+ int size = sizeof(*sch) + ops->priv_size;
- p = kmalloc(size, GFP_KERNEL);
- if (!p)
+ sch = kmalloc(size, GFP_KERNEL);
+ if (!sch)
return NULL;
- memset(p, 0, size);
+ memset(sch, 0, size);
- sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
- sch->padded = (char *)sch - (char *)p;
-
- INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- dev_hold(dev);
sch->stats_lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
/* enqueue is accessed locklessly - make sure it's visible
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
- kfree(p);
+ kfree(sch);
return NULL;
}
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&qdisc->stats);
#endif
- write_lock(&qdisc_tree_lock);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
- write_unlock(&qdisc_tree_lock);
module_put(ops->owner);
- dev_put(qdisc->dev);
if (!(qdisc->flags&TCQ_F_BUILTIN))
- kfree((char *) qdisc - qdisc->padded);
+ kfree(qdisc);
}
/* Under dev->queue_lock and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
+ struct net_device *dev = qdisc->dev;
+
if (!atomic_dec_and_test(&qdisc->refcnt))
return;
- list_del(&qdisc->list);
+
+ if (dev) {
+ struct Qdisc *q, **qp;
+ for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
+ if (q == qdisc) {
+ *qp = q->next;
+ break;
+ }
+ }
+ }
+
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
+
}
+
void dev_activate(struct net_device *dev)
{
/* No queueing discipline is attached to device;
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- write_lock_bh(&qdisc_tree_lock);
- list_add_tail(&qdisc->list, &dev->qdisc_list);
- write_unlock_bh(&qdisc_tree_lock);
+
+ write_lock(&qdisc_tree_lock);
+ qdisc->next = dev->qdisc_list;
+ dev->qdisc_list = qdisc;
+ write_unlock(&qdisc_tree_lock);
+
} else {
qdisc = &noqueue_qdisc;
}
- write_lock_bh(&qdisc_tree_lock);
+ write_lock(&qdisc_tree_lock);
dev->qdisc_sleeping = qdisc;
- write_unlock_bh(&qdisc_tree_lock);
+ write_unlock(&qdisc_tree_lock);
}
spin_lock_bh(&dev->queue_lock);
void dev_init_scheduler(struct net_device *dev)
{
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
dev->qdisc = &noop_qdisc;
+ spin_unlock_bh(&dev->queue_lock);
dev->qdisc_sleeping = &noop_qdisc;
- INIT_LIST_HEAD(&dev->qdisc_list);
- qdisc_unlock_tree(dev);
+ dev->qdisc_list = NULL;
+ write_unlock(&qdisc_tree_lock);
dev_watchdog_init(dev);
}
{
struct Qdisc *qdisc;
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
qdisc = dev->qdisc_sleeping;
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
}
#endif
+ BUG_TRAP(dev->qdisc_list == NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
- qdisc_unlock_tree(dev);
+ dev->qdisc_list = NULL;
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
}
EXPORT_SYMBOL(__netdev_watchdog_up);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_restart);
-EXPORT_SYMBOL(qdisc_lock_tree);
-EXPORT_SYMBOL(qdisc_unlock_tree);
+EXPORT_SYMBOL(qdisc_tree_lock);
{
psched_time_t now;
struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
unsigned long qave=0;
int i=0;
gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
q= t->tab[(skb->tc_index&0xf)];
/* error checking here -- probably unnecessary */
PSCHED_SET_PASTPERFECT(q->qidlestart);
{
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
skb = __skb_dequeue(&sch->q);
if (skb) {
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
{
int i;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
__skb_queue_purge(&sch->q);
static int gred_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct gred_sched_data *q;
struct tc_gred_qopt *ctl;
struct tc_gred_sopt *sopt;
static int gred_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct tc_gred_sopt *sopt;
struct rtattr *tb[TCA_GRED_STAB];
struct rtattr *tb2[TCA_GRED_DPS];
struct rtattr *rta;
struct tc_gred_qopt *opt = NULL ;
struct tc_gred_qopt *dst;
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct gred_sched_data *q;
int i;
unsigned char *b = skb->tail;
static void gred_destroy(struct Qdisc *sch)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
int i;
for (i = 0;i < table->DPs; i++) {
/*
* macros
*/
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#include <linux/time.h>
#undef PSCHED_GET_TIME
#define PSCHED_GET_TIME(stamp) \
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
- * Clock source resolution (CONFIG_NET_SCH_CLK_*)
- * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- * CPU: resolution is between 0.5us and 1us.
- * GETTIMEOFDAY: resolution is exactly 1us.
+ * Time source resolution
+ * PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
+ * PSCHED_CPU: resolution is between 0.5us and 1us.
+ * PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
static inline struct hfsc_class *
hfsc_find_class(u32 classid, struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct rtattr **tca, unsigned long *arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)*arg;
struct hfsc_class *parent = NULL;
struct rtattr *opt = tca[TCA_OPTIONS-1];
static void
hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
static int
hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
static struct tcf_proto **
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl == NULL)
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
u64 next_time = 0;
long delay;
static int
hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct tc_hfsc_qopt *qopt;
unsigned int i;
static int
hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct tc_hfsc_qopt *qopt;
if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
static void
hfsc_reset_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_destroy_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl, *next;
unsigned int i;
static int
hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
unsigned char *b = skb->tail;
struct tc_hfsc_qopt qopt;
static struct sk_buff *
hfsc_dequeue(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
struct sk_buff *skb;
u64 cur_time;
static int
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
static unsigned int
hfsc_drop(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int len;
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30010 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
struct htb_class_inner {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
- /* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
- u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
int row_mask[TC_HTB_MAXDEPTH];
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
- u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct list_head *p;
if (TC_H_MAJ(handle) != sch->handle)
return NULL;
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.ptr[prio] == cl->node+prio) {
- /* we are removing child which is pointed to from
- parent feed - forget the pointer but remember
- classid */
- p->un.inner.last_ptr_id[prio] = cl->classid;
- p->un.inner.ptr[prio] = NULL;
- }
+ if (p->un.inner.ptr[prio] == cl->node+prio)
+ htb_next_rb_node(p->un.inner.ptr + prio);
htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch,&ret);
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb;
static void htb_rate_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct list_head *p;
/* lock queue so that we can muck with it */
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
-#endif
q->jiffies);
diff = 1000;
}
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
-#endif
q->jiffies);
diff = 1000;
}
return HZ/10;
}
-/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- is no such one exists. */
-static struct rb_node *
-htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
-{
- struct rb_node *r = NULL;
- while (n) {
- struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
- if (id == cl->classid) return n;
-
- if (id > cl->classid) {
- n = n->rb_right;
- } else {
- r = n;
- n = n->rb_left;
- }
- }
- return r;
-}
-
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
static struct htb_class *
-htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
+htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
- u32 *pid;
} stk[TC_HTB_MAXDEPTH],*sp = stk;
BUG_TRAP(tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
- sp->pid = pid;
for (i = 0; i < 65535; i++) {
- HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
-
- if (!*sp->pptr && *sp->pid) {
- /* ptr was invalidated but id is valid - try to recover
- the original or next ptr */
- *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
- }
- *sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
return cl;
(++sp)->root = cl->un.inner.feed[prio].rb_node;
sp->pptr = cl->un.inner.ptr+prio;
- sp->pid = cl->un.inner.last_ptr_id+prio;
}
}
BUG_TRAP(0);
struct sk_buff *skb = NULL;
struct htb_class *cl,*start;
/* look initial class up in the row */
- start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
- q->ptr[level]+prio,q->last_ptr_id[level]+prio);
+ start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
do {
next:
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
- next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
- prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
-
+ next = htb_lookup_leaf (q->row[level]+prio,
+ prio,q->ptr[level]+prio);
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
}
q->nwc_hit++;
htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
- cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
- q->last_ptr_id[level]+prio);
-
+ cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
} while (cl != start);
if (likely(skb != NULL)) {
static void htb_delay_by(struct Qdisc *sch,long delay)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
if (delay <= 0) delay = 1;
if (unlikely(delay > 5*HZ)) {
if (net_ratelimit())
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int level;
long min_delay;
#ifdef HTB_DEBUG
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int i;
HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
struct rtattr *tb[TCA_HTB_INIT];
struct tc_htb_glob *gopt;
int i;
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_glob gopt;
struct sk_buff *skb, struct tcmsg *tcm)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
#endif
struct htb_class *cl = (struct htb_class*)arg;
unsigned char *b = skb->tail;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
if (cl->prio_activity)
- htb_deactivate (qdisc_priv(sch),cl);
+ htb_deactivate ((struct htb_sched*)sch->data,cl);
/* TODO: is it correct ? Why CBQ doesn't do it ? */
sch->q.qlen -= (*old)->q.qlen;
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
#endif
struct htb_class *cl = htb_find(classid,sch);
HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
if (!cl->level) {
BUG_TRAP(cl->un.leaf.q);
/* always caled under BH & queue lock */
static void htb_destroy(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
HTB_DBG(0,1,"htb_destroy q=%p\n",q);
del_timer_sync (&q->timer);
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
#endif
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
u32 parentid, struct rtattr **tca, unsigned long *arg)
{
int err = -EINVAL;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class*)*arg,*parent;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_find (classid,sch);
HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
/*if (cl && !cl->level) return 0;
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class *)arg;
HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
if (cl)
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int i;
if (arg->stop)
#endif
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
/* Thanks to Doron Oz for this hack
*/
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
psched_time_t now;
long delay;
PSCHED_TADD2(now, delay, cb->time_to_send);
/* Always queue at tail to keep packets in order */
- if (likely(q->delayed.qlen < q->limit)) {
- __skb_queue_tail(&q->delayed, skb);
- sch->q.qlen++;
- sch->stats.bytes += skb->len;
- sch->stats.packets++;
- return 0;
- }
-
- sch->stats.drops++;
- kfree_skb(skb);
- return NET_XMIT_DROP;
+ __skb_queue_tail(&q->delayed, skb);
+ sch->q.qlen++;
+ sch->stats.bytes += skb->len;
+ sch->stats.packets++;
+ return 0;
}
/* Requeue packets but don't change time stamp */
static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int netem_drop(struct Qdisc* sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
*/
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct sk_buff *skb;
psched_time_t now;
static void netem_reset(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
qdisc_reset(q->qdisc);
skb_queue_purge(&q->delayed);
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct tc_netem_qopt *qopt = RTA_DATA(opt);
struct Qdisc *child;
int ret;
static int netem_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
if (!opt)
return -EINVAL;
static void netem_destroy(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
del_timer_sync(&q->timer);
- qdisc_destroy(q->qdisc);
}
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_netem_qopt qopt;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
- qopt.limit = q->limit;
+ qopt.limit = sch->dev->tx_queue_len;
qopt.loss = q->loss;
qopt.gap = q->gap;
return -1;
}
-static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
-
- if (cl != 1) /* only one class */
- return -ENOENT;
-
- tcm->tcm_handle |= TC_H_MIN(1);
- tcm->tcm_info = q->qdisc->handle;
-
- return 0;
-}
-
-static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
-
- if (new == NULL)
- new = &noop_qdisc;
-
- sch_tree_lock(sch);
- *old = xchg(&q->qdisc, new);
- qdisc_reset(*old);
- sch->q.qlen = 0;
- sch_tree_unlock(sch);
-
- return 0;
-}
-
-static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
- return q->qdisc;
-}
-
-static unsigned long netem_get(struct Qdisc *sch, u32 classid)
-{
- return 1;
-}
-
-static void netem_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct rtattr **tca, unsigned long *arg)
-{
- return -ENOSYS;
-}
-
-static int netem_delete(struct Qdisc *sch, unsigned long arg)
-{
- return -ENOSYS;
-}
-
-static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
- }
-}
-
-static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
- return NULL;
-}
-
-static struct Qdisc_class_ops netem_class_ops = {
- .graft = netem_graft,
- .leaf = netem_leaf,
- .get = netem_get,
- .put = netem_put,
- .change = netem_change_class,
- .delete = netem_delete,
- .walk = netem_walk,
- .tcf_chain = netem_find_tcf,
- .dump = netem_dump_class,
-};
-
static struct Qdisc_ops netem_qdisc_ops = {
.id = "netem",
- .cl_ops = &netem_class_ops,
.priv_size = sizeof(struct netem_sched_data),
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
u32 band = skb->priority;
struct tcf_result res;
prio_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
struct Qdisc *qdisc;
static unsigned int prio_drop(struct Qdisc* sch)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
unsigned int len;
struct Qdisc *qdisc;
prio_reset(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
for (prio=0; prio<q->bands; prio++)
qdisc_reset(q->queues[prio]);
prio_destroy(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
struct tcf_proto *tp;
while ((tp = q->filter_list) != NULL) {
tcf_destroy(tp);
}
- for (prio=0; prio<q->bands; prio++)
+ for (prio=0; prio<q->bands; prio++) {
qdisc_destroy(q->queues[prio]);
+ q->queues[prio] = &noop_qdisc;
+ }
}
static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
struct tc_prio_qopt *qopt = RTA_DATA(opt);
int i;
static int prio_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int i;
for (i=0; i<TCQ_PRIO_BANDS; i++)
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_prio_qopt opt;
static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = arg - 1;
if (band >= q->bands)
static struct Qdisc *
prio_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = arg - 1;
if (band >= q->bands)
static unsigned long prio_get(struct Qdisc *sch, u32 classid)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = TC_H_MIN(classid);
if (band - 1 >= q->bands)
static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg)
{
unsigned long cl = *arg;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
static int prio_delete(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
return 0;
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
struct tcmsg *tcm)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
if (arg->stop)
static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl)
return NULL;
static int
red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
psched_time_t now;
static int
red_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
PSCHED_SET_PASTPERFECT(q->qidlestart);
red_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
skb = __skb_dequeue(&sch->q);
if (skb) {
static unsigned int red_drop(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
static void red_reset(struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
__skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
static int red_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
struct rtattr *tb[TCA_RED_STAB];
struct tc_red_qopt *ctl;
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_red_qopt opt;
return -1;
}
+static void red_destroy(struct Qdisc *sch)
+{
+}
+
static struct Qdisc_ops red_qdisc_ops = {
.next = NULL,
.cl_ops = NULL,
.drop = red_drop,
.init = red_init,
.reset = red_reset,
+ .destroy = red_destroy,
.change = red_change,
.dump = red_dump,
.owner = THIS_MODULE,
static unsigned int sfq_drop(struct Qdisc *sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
sfq_index d = q->max_depth;
struct sk_buff *skb;
unsigned int len;
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static struct sk_buff *
sfq_dequeue(struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
struct sk_buff *skb;
sfq_index a, old_a;
static void sfq_perturbation(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
q->perturbation = net_random()&0x1F;
q->perturb_timer.expires = jiffies + q->perturb_period;
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
struct tc_sfq_qopt *ctl = RTA_DATA(opt);
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
int i;
init_timer(&q->perturb_timer);
static void sfq_destroy(struct Qdisc *sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
del_timer(&q->perturb_timer);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_sfq_qopt opt;
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret;
if (skb->len > q->max_size) {
static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int tbf_drop(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
struct sk_buff *skb;
skb = q->qdisc->dequeue(q->qdisc);
static void tbf_reset(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
{
int err = -EINVAL;
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
struct rtattr *tb[TCA_TBF_PTAB];
struct tc_tbf_qopt *qopt;
struct qdisc_rate_table *rtab = NULL;
static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
if (opt == NULL)
return -EINVAL;
static void tbf_destroy(struct Qdisc *sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
del_timer(&q->wd_timer);
qdisc_put_rtab(q->R_tab);
qdisc_destroy(q->qdisc);
+ q->qdisc = &noop_qdisc;
}
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_tbf_qopt opt;
static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
if (cl != 1) /* only one class */
return -ENOENT;
static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
if (new == NULL)
new = &noop_qdisc;
static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
return q->qdisc;
}
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data*)((q)->data))->next)
#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST)
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct net_device *dev = sch->dev;
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
__skb_queue_tail(&q->q, skb);
if (q->q.qlen <= dev->tx_queue_len) {
static int
teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
__skb_queue_head(&q->q, skb);
return 0;
static struct sk_buff *
teql_dequeue(struct Qdisc* sch)
{
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
struct sk_buff *skb;
skb = __skb_dequeue(&dat->q);
static void
teql_reset(struct Qdisc* sch)
{
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
skb_queue_purge(&dat->q);
sch->q.qlen = 0;
teql_destroy(struct Qdisc* sch)
{
struct Qdisc *q, *prev;
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) {
{
struct net_device *dev = sch->dev;
struct teql_master *m = (struct teql_master*)sch->ops;
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
if (dev->hard_header_len > m->dev->hard_header_len)
return -EINVAL;
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
{
- struct teql_sched_data *q = qdisc_priv(dev->qdisc);
+ struct teql_sched_data *q = (void*)dev->qdisc->data;
struct neighbour *mn = skb->dst->neighbour;
struct neighbour *n = q->ncache;
config IP_SCTP
tristate "The SCTP Protocol (EXPERIMENTAL)"
depends on IPV6 || IPV6=n
- select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
- select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
- select CRYPTO_SHA1 if SCTP_HMAC_SHA1
- select CRYPTO_MD5 if SCTP_HMAC_MD5
---help---
Stream Control Transmission Protocol
config SCTP_HMAC_SHA1
bool "HMAC-SHA1"
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
help
Enable the use of HMAC-SHA1 during association establishment. It
is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_MD5
bool "HMAC-MD5"
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
help
Enable the use of HMAC-MD5 during association establishment. It is
advised to use either HMAC-MD5 or HMAC-SHA1.
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
- case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
offset = 0;
if ((whole > 1) || (whole && over))
- SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+ SCTP_INC_STATS_USER(SctpFragUsrMsgs);
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
if (val != cmp) {
/* CRC failure, dump it. */
- SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+ SCTP_INC_STATS_BH(SctpChecksumErrors);
return -1;
}
return 0;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
- SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+ SCTP_INC_STATS_BH(SctpInSCTPPacks);
sh = (struct sctphdr *) skb->h.raw;
if (!asoc) {
ep = __sctp_rcv_lookup_endpoint(&dest);
if (sctp_rcv_ootb(skb)) {
- SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+ SCTP_INC_STATS_BH(SctpOutOfBlues);
goto discard_release;
}
}
if (asoc) {
if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
sk = asoc->base.sk;
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
*epp = ep;
*app = asoc;
int err;
if (skb->len < ((iph->ihl << 2) + 8)) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
/* Warning: The sock lock is held. Remember to call
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
goto out;
}
__FUNCTION__, skb, skb->len,
NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+ SCTP_INC_STATS(SctpOutSCTPPacks);
return ip6_xmit(sk, skb, &fl, np->opt, ipfragok);
}
return err;
no_route:
kfree_skb(nskb);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
sctp_outq_tail_data(q, chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+ SCTP_INC_STATS(SctpOutUnorderChunks);
else
- SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+ SCTP_INC_STATS(SctpOutOrderChunks);
q->empty = 0;
break;
};
} else {
__skb_queue_tail(&q->control, (struct sk_buff *) chunk);
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
}
if (error < 0)
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
- struct list_head *lchunk, *lchunk1;
+ struct list_head *lchunk;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
- struct sctp_chunk *chunk, *chunk1;
+ struct sctp_chunk *chunk;
struct sctp_association *asoc;
int error = 0;
* the transmitted list.
*/
list_add_tail(lchunk, &transport->transmitted);
-
- /* Mark the chunk as ineligible for fast retransmit
- * after it is retransmitted.
- */
- chunk->fast_retransmit = 0;
-
*start_timer = 1;
q->empty = 0;
lchunk = sctp_list_dequeue(lqueue);
break;
};
-
- /* If we are here due to a retransmit timeout or a fast
- * retransmit and if there are any chunks left in the retransmit
- * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
- */
- if (rtx_timeout && !lchunk) {
- list_for_each(lchunk1, lqueue) {
- chunk1 = list_entry(lchunk1, struct sctp_chunk,
- transmitted_list);
- chunk1->fast_retransmit = 0;
- }
- }
}
return error;
if (ftsn_chunk) {
__skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
}
}
#include <linux/init.h>
#include <net/sctp/sctp.h>
-struct snmp_mib sctp_snmp_list[] = {
- SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
- SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
- SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
- SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS),
- SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS),
- SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES),
- SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS),
- SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS),
- SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS),
- SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS),
- SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS),
- SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS),
- SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS),
- SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS),
- SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS),
- SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS),
- SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS),
+static char *sctp_snmp_list[] = {
+#define SCTP_SNMP_ENTRY(x) #x
+ SCTP_SNMP_ENTRY(SctpCurrEstab),
+ SCTP_SNMP_ENTRY(SctpActiveEstabs),
+ SCTP_SNMP_ENTRY(SctpPassiveEstabs),
+ SCTP_SNMP_ENTRY(SctpAborteds),
+ SCTP_SNMP_ENTRY(SctpShutdowns),
+ SCTP_SNMP_ENTRY(SctpOutOfBlues),
+ SCTP_SNMP_ENTRY(SctpChecksumErrors),
+ SCTP_SNMP_ENTRY(SctpOutCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpOutOrderChunks),
+ SCTP_SNMP_ENTRY(SctpOutUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpInCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpInOrderChunks),
+ SCTP_SNMP_ENTRY(SctpInUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpFragUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpReasmUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpOutSCTPPacks),
+ SCTP_SNMP_ENTRY(SctpInSCTPPacks),
+#undef SCTP_SNMP_ENTRY
};
/* Return the current value of a particular entry in the mib by adding its
{
int i;
- for (i = 0; sctp_snmp_list[i].name != NULL; i++)
- seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
- fold_field((void **)sctp_statistics,
- sctp_snmp_list[i].entry));
+ for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++)
+ seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i],
+ fold_field((void **)sctp_statistics, i));
return 0;
}
NIPQUAD(((struct rtable *)skb->dst)->rt_src),
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+ SCTP_INC_STATS(SctpOutSCTPPacks);
return ip_queue_xmit(skb, ipfragok);
}
if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
goto clean_up;
spin_lock_bh(&sctp_assocs_id_lock);
- error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1,
- &assoc_id);
+ error = idr_get_new(&sctp_assocs_id,
+ (void *)asoc,
+ &assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;
}
}
-/* Helper function to stop any pending T3-RTX timers */
-static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
- struct sctp_association *asoc)
-{
- struct sctp_transport *t;
- struct list_head *pos;
-
- list_for_each(pos, &asoc->peer.transport_addr_list) {
- t = list_entry(pos, struct sctp_transport, transports);
- if (timer_pending(&t->T3_rtx_timer) &&
- del_timer(&t->T3_rtx_timer)) {
- sctp_transport_put(t);
- }
- }
-}
-
-
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
return;
}
-/* Helper function to remove the association non-primary peer
- * transports.
- */
-static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
-{
- struct sctp_transport *t;
- struct list_head *pos;
- struct list_head *temp;
-
- list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
- t = list_entry(pos, struct sctp_transport, transports);
- if (!sctp_cmp_addr_exact(&t->ipaddr,
- &asoc->peer.primary_addr)) {
- sctp_assoc_del_peer(asoc, &t->ipaddr);
- }
- }
-
- return;
-}
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(cmd->obj.ptr));
-
- /* FIXME - Eventually come up with a cleaner way to
- * enabling COOKIE-ECHO + DATA bundling during
- * multihoming stale cookie scenarios, the following
- * command plays with asoc->peer.retran_path to
- * avoid the problem of sending the COOKIE-ECHO and
- * DATA in different paths, which could result
- * in the association being ABORTed if the DATA chunk
- * is processed first by the server. Checking the
- * init error counter simply causes this command
- * to be executed only during failed attempts of
- * association establishment.
- */
- if ((asoc->peer.retran_path !=
- asoc->peer.primary_path) &&
- (asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) {
- sctp_add_cmd_sf(commands,
- SCTP_CMD_FORCE_PRIM_RETRAN,
- SCTP_NULL());
- }
-
break;
case SCTP_CMD_GEN_SHUTDOWN:
case SCTP_CMD_CLEAR_INIT_TAG:
asoc->peer.i.init_tag = 0;
break;
- case SCTP_CMD_DEL_NON_PRIMARY:
- sctp_cmd_del_non_primary(asoc);
- break;
- case SCTP_CMD_T3_RTX_TIMERS_STOP:
- sctp_cmd_t3_rtx_timers_stop(commands, asoc);
- break;
- case SCTP_CMD_FORCE_PRIM_RETRAN:
- t = asoc->peer.retran_path;
- asoc->peer.retran_path = asoc->peer.primary_path;
- error = sctp_outq_uncork(&asoc->outqueue);
- local_cork = 0;
- asoc->peer.retran_path = t;
- break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
+ SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
- SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpPassiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- /* Reset init error count upon receipt of COOKIE-ACK,
- * to avoid problems with the managemement of this
- * counter in stale cookie situations when a transition back
- * from the COOKIE-ECHOED state to the COOKIE-WAIT
- * state is performed.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
- SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
-
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
- SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpActiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
+ struct list_head *pos;
+ struct sctp_transport *t;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts;
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
- /* Stop pending T3-rtx and heartbeat timers */
- sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
-
- /* Delete non-primary peer ip addresses since we are transitioning
- * back to the COOKIE-WAIT state
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
-
- /* If we've sent any data bundled with COOKIE-ECHO we will need to
- * resend
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
- SCTP_TRANSPORT(asoc->peer.primary_path));
-
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_INC,
SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
+ /* If we've sent any data bundled with COOKIE-ECHO we need to
+ * resend.
+ */
+ list_for_each(pos, &asoc->peer.transport_addr_list) {
+ t = list_entry(pos, struct sctp_transport, transports);
+ sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(t));
+ }
+
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- int error;
+ sctp_datahdr_t *data_hdr;
+ struct sctp_chunk *err;
+ size_t datalen;
+ sctp_verb_t deliver;
+ int tmp;
+ __u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- error = sctp_eat_data(asoc, chunk, commands );
- switch (error) {
- case SCTP_IERROR_NO_ERROR:
- break;
- case SCTP_IERROR_HIGH_TSN:
- case SCTP_IERROR_BAD_STREAM:
+ data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+
+ tsn = ntohl(data_hdr->tsn);
+ SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+
+ if (!chunk->ecn_ce_done) {
+ struct sctp_af *af;
+ chunk->ecn_ce_done = 1;
+
+ af = sctp_get_af_specific(
+ ipver2af(chunk->skb->nh.iph->version));
+
+ if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
+ /* Do real work as sideffect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
+ }
+
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
goto discard_noforce;
- case SCTP_IERROR_DUP_TSN:
- case SCTP_IERROR_IGNORE_TSN:
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
goto discard_force;
- case SCTP_IERROR_NO_DATA:
- goto consume;
- default:
- BUG();
}
+ /* This is a new TSN. */
+
+ /* Discard if there is no room in the receive window.
+ * Actually, allow a little bit of overflow (up to a MTU).
+ */
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sizeof(sctp_data_chunk_t);
+
+ deliver = SCTP_CMD_CHUNK_ULP;
+
+ /* Think about partial delivery. */
+ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+
+ /* Even if we don't accept this chunk there is
+ * memory pressure.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
+ }
+
+ /* Spill over rwnd a little bit. Note: While allowed, this spill over
+ * seems a bit troublesome in that frag_point varies based on
+ * PMTU. In cases, such as loopback, this might be a rather
+ * large spill over.
+ */
+ if (!asoc->rwnd || asoc->rwnd_over ||
+ (datalen > asoc->rwnd + asoc->frag_point)) {
+
+ /* If this is the next TSN, consider reneging to make
+ * room. Note: Playing nice with a confused sender. A
+ * malicious sender can still eat up all our buffer
+ * space and in the future we may want to detect and
+ * do more drastic reneging.
+ */
+ if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
+ (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
+ deliver = SCTP_CMD_RENEGE;
+ } else {
+ SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
+ "rwnd: %d\n", tsn, datalen,
+ asoc->rwnd);
+ goto discard_force;
+ }
+ }
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_U32(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
+ /* If definately accepting the DATA chunk, record its TSN, otherwise
+ * wait for renege processing.
+ */
+ if (SCTP_CMD_CHUNK_ULP == deliver)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ /* Note: Some chunks may get overcounted (if we drop) or overcounted
+ * if we renege and the chunk arrives again.
+ */
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream));
+ if (err)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ goto discard_noforce;
+ }
+
+ /* Send the data up to the user. Note: Schedule the
+ * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
+ * chunk needs the updated rwnd.
+ */
+ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
+
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
}
return SCTP_DISPOSITION_DISCARD;
-consume:
- return SCTP_DISPOSITION_CONSUME;
-
}
/*
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- int error;
+ sctp_datahdr_t *data_hdr;
+ struct sctp_chunk *err;
+ size_t datalen;
+ int tmp;
+ __u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- error = sctp_eat_data(asoc, chunk, commands );
- switch (error) {
- case SCTP_IERROR_NO_ERROR:
- case SCTP_IERROR_HIGH_TSN:
- case SCTP_IERROR_DUP_TSN:
- case SCTP_IERROR_IGNORE_TSN:
- case SCTP_IERROR_BAD_STREAM:
- break;
- case SCTP_IERROR_NO_DATA:
- goto consume;
- default:
- BUG();
+ data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *) chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+
+ tsn = ntohl(data_hdr->tsn);
+
+ SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+ if (!chunk->ecn_ce_done) {
+ struct sctp_af *af;
+ chunk->ecn_ce_done = 1;
+
+ af = sctp_get_af_specific(
+ ipver2af(chunk->skb->nh.iph->version));
+
+ if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
+ /* Do real work as sideffect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
}
- /* Go a head and force a SACK, since we are shutting down. */
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
+ goto gen_shutdown;
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
+ goto gen_shutdown;
+ }
+
+ /* This is a new TSN. */
+
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sizeof(sctp_data_chunk_t);
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_U32(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
+ /* We are accepting this DATA chunk. */
+
+ /* Record the fact that we have received this TSN. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream));
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ }
+ /* Go a head and force a SACK, since we are shutting down. */
+gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
-
-consume:
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
__u8 *ch_end;
int ootb_shut_ack = 0;
- SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+ SCTP_INC_STATS(SctpOutOfBlues);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return retval;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+ SCTP_INC_STATS(SctpShutdowns);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
- len += (num_blocks + num_dup_tsns) * sizeof(__u32);
+ len = (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
} else
sctp_chunk_free (err_chunk);
}
}
-
-
-/* Process a data chunk */
-int sctp_eat_data(const struct sctp_association *asoc,
- struct sctp_chunk *chunk,
- sctp_cmd_seq_t *commands)
-{
- sctp_datahdr_t *data_hdr;
- struct sctp_chunk *err;
- size_t datalen;
- sctp_verb_t deliver;
- int tmp;
- __u32 tsn;
-
- data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
- skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
-
- tsn = ntohl(data_hdr->tsn);
- SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
-
- /* ASSERT: Now skb->data is really the user data. */
-
- /* Process ECN based congestion.
- *
- * Since the chunk structure is reused for all chunks within
- * a packet, we use ecn_ce_done to track if we've already
- * done CE processing for this packet.
- *
- * We need to do ECN processing even if we plan to discard the
- * chunk later.
- */
-
- if (!chunk->ecn_ce_done) {
- struct sctp_af *af;
- chunk->ecn_ce_done = 1;
-
- af = sctp_get_af_specific(
- ipver2af(chunk->skb->nh.iph->version));
-
- if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
- /* Do real work as sideffect. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
- SCTP_U32(tsn));
- }
- }
-
- tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
- if (tmp < 0) {
- /* The TSN is too high--silently discard the chunk and
- * count on it getting retransmitted later.
- */
- return SCTP_IERROR_HIGH_TSN;
- } else if (tmp > 0) {
- /* This is a duplicate. Record it. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
- return SCTP_IERROR_DUP_TSN;
- }
-
- /* This is a new TSN. */
-
- /* Discard if there is no room in the receive window.
- * Actually, allow a little bit of overflow (up to a MTU).
- */
- datalen = ntohs(chunk->chunk_hdr->length);
- datalen -= sizeof(sctp_data_chunk_t);
-
- deliver = SCTP_CMD_CHUNK_ULP;
-
- /* Think about partial delivery. */
- if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
-
- /* Even if we don't accept this chunk there is
- * memory pressure.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
- }
-
- /* Spill over rwnd a little bit. Note: While allowed, this spill over
- * seems a bit troublesome in that frag_point varies based on
- * PMTU. In cases, such as loopback, this might be a rather
- * large spill over.
- */
- if (!asoc->rwnd || asoc->rwnd_over ||
- (datalen > asoc->rwnd + asoc->frag_point)) {
-
- /* If this is the next TSN, consider reneging to make
- * room. Note: Playing nice with a confused sender. A
- * malicious sender can still eat up all our buffer
- * space and in the future we may want to detect and
- * do more drastic reneging.
- */
- if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
- (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
- SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
- deliver = SCTP_CMD_RENEGE;
- } else {
- SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
- "rwnd: %d\n", tsn, datalen,
- asoc->rwnd);
- return SCTP_IERROR_IGNORE_TSN;
- }
- }
-
- /*
- * Section 3.3.10.9 No User Data (9)
- *
- * Cause of error
- * ---------------
- * No User Data: This error cause is returned to the originator of a
- * DATA chunk if a received DATA chunk has no user data.
- */
- if (unlikely(0 == datalen)) {
- err = sctp_make_abort_no_data(asoc, chunk, tsn);
- if (err) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- }
- /* We are going to ABORT, so we might as well stop
- * processing the rest of the chunks in the packet.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_U32(SCTP_ERROR_NO_DATA));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
- return SCTP_IERROR_NO_DATA;
- }
-
- /* If definately accepting the DATA chunk, record its TSN, otherwise
- * wait for renege processing.
- */
- if (SCTP_CMD_CHUNK_ULP == deliver)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
-
- /* Note: Some chunks may get overcounted (if we drop) or overcounted
- * if we renege and the chunk arrives again.
- */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
- else
- SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
-
- /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
- *
- * If an endpoint receive a DATA chunk with an invalid stream
- * identifier, it shall acknowledge the reception of the DATA chunk
- * following the normal procedure, immediately send an ERROR chunk
- * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
- * and discard the DATA chunk.
- */
- if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
- err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
- &data_hdr->stream,
- sizeof(data_hdr->stream));
- if (err)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- return SCTP_IERROR_BAD_STREAM;
- }
-
- /* Send the data up to the user. Note: Schedule the
- * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
- * chunk needs the updated rwnd.
- */
- sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
-
- return SCTP_IERROR_NO_ERROR;
-}
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
- /*
- * API 7. Socket Options (setting the default value for the endpoint)
- * All options that support specific settings on an association by
- * filling in either an association id variable or a sockaddr_storage
- * SHOULD also support setting of the same value for the entire endpoint
- * (i.e. future associations). To accomplish this the following logic is
- * used when setting one of these options:
-
- * c) If neither the sockaddr_storage or association identification is
- * set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and
- * the association identification is 0, the settings are a default
- * and to be applied to the endpoint (all future associations).
- */
-
- /* update default value for endpoint (all future associations) */
- if (!params.spp_assoc_id &&
- sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
- if (params.spp_hbinterval)
- sctp_sk(sk)->paddrparam.spp_hbinterval =
- params.spp_hbinterval;
- if (sctp_max_retrans_path)
- sctp_sk(sk)->paddrparam.spp_pathmaxrxt =
- params.spp_pathmaxrxt;
- return 0;
- }
-
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
- /* If no association id is specified retrieve the default value
- * for the endpoint that will be used for all future associations
- */
- if (!params.spp_assoc_id &&
- sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
- params.spp_hbinterval = sctp_sk(sk)->paddrparam.spp_hbinterval;
- params.spp_pathmaxrxt = sctp_sk(sk)->paddrparam.spp_pathmaxrxt;
-
- goto done;
- }
-
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
*/
params.spp_pathmaxrxt = trans->error_threshold;
-done:
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
};
event = sctp_skb2event(f_frag);
- SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+ SCTP_INC_STATS(SctpReasmUsrMsgs);
return event;
}
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <net/tux.h>
#include <linux/wanrouter.h>
#include <linux/if_bridge.h>
#include <linux/init.h>
* in the operation structures but are done directly via the socketcall() multiplexor.
*/
-struct file_operations socket_file_ops = {
+static struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.aio_read = sock_aio_read,
* but we take care of internal coherence yet.
*/
-struct file * sock_map_file(struct socket *sock)
+int sock_map_fd(struct socket *sock)
{
- struct file *file;
+ int fd;
struct qstr this;
char name[32];
- file = get_empty_filp();
-
- if (!file)
- return ERR_PTR(-ENFILE);
-
- sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
- this.name = name;
- this.len = strlen(name);
- this.hash = SOCK_INODE(sock)->i_ino;
-
- file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
- if (!file->f_dentry) {
- put_filp(file);
- return ERR_PTR(-ENOMEM);
- }
- file->f_dentry->d_op = &sockfs_dentry_operations;
- d_add(file->f_dentry, SOCK_INODE(sock));
- file->f_vfsmnt = mntget(sock_mnt);
-file->f_mapping = file->f_dentry->d_inode->i_mapping;
-
- if (sock->file)
- BUG();
- sock->file = file;
- file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
- file->f_mode = FMODE_READ | FMODE_WRITE;
- file->f_flags = O_RDWR;
- file->f_pos = 0;
-
- return file;
-}
-
-int sock_map_fd(struct socket *sock)
-{
- int fd;
- struct file *file;
-
/*
* Find a file descriptor suitable for return to the user.
*/
-
+
fd = get_unused_fd();
- if (fd < 0)
- return fd;
-
- file = sock_map_file(sock);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
+ if (fd >= 0) {
+ struct file *file = get_empty_filp();
+
+ if (!file) {
+ put_unused_fd(fd);
+ fd = -ENFILE;
+ goto out;
+ }
+
+ sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = SOCK_INODE(sock)->i_ino;
+
+ file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
+ if (!file->f_dentry) {
+ put_filp(file);
+ put_unused_fd(fd);
+ fd = -ENOMEM;
+ goto out;
+ }
+ file->f_dentry->d_op = &sockfs_dentry_operations;
+ d_add(file->f_dentry, SOCK_INODE(sock));
+ file->f_vfsmnt = mntget(sock_mnt);
+ file->f_mapping = file->f_dentry->d_inode->i_mapping;
+
+ sock->file = file;
+ file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ file->f_mode = 3;
+ file->f_flags = O_RDWR;
+ file->f_pos = 0;
+ fd_install(fd, file);
}
- fd_install(fd, file);
-
+
+out:
return fd;
}
else
vx_sock_fail(sock->sk, size);
}
- vxdprintk(VXD_CBIT(net, 7),
- "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d",
+ vxdprintk("__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d\n",
sock, sock->sk,
(sock->sk)?sock->sk->sk_nx_info:0,
(sock->sk)?sock->sk->sk_vx_info:0,
(sock->sk)?sock->sk->sk_xid:0,
- (unsigned int)size, len);
+ size, len);
return len;
}
return ret;
}
-int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t size)
-{
- mm_segment_t oldfs = get_fs();
- int result;
-
- set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msg->msg_iov = (struct iovec *)vec,
- msg->msg_iovlen = num;
- result = sock_sendmsg(sock, msg, size);
- set_fs(oldfs);
- return result;
-}
static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
if ((len >= 0) && sock->sk)
vx_sock_recv(sock->sk, len);
- vxdprintk(VXD_CBIT(net, 7),
- "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d",
+ vxdprintk("__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d\n",
sock, sock->sk,
(sock->sk)?sock->sk->sk_nx_info:0,
(sock->sk)?sock->sk->sk_vx_info:0,
(sock->sk)?sock->sk->sk_xid:0,
- (unsigned int)size, len);
+ size, len);
return len;
}
return ret;
}
-int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t size, int flags)
-{
- mm_segment_t oldfs = get_fs();
- int result;
-
- set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msg->msg_iov = (struct iovec *)vec,
- msg->msg_iovlen = num;
- result = sock_recvmsg(sock, msg, size, flags);
- set_fs(oldfs);
- return result;
-}
-
static void sock_aio_dtor(struct kiocb *iocb)
{
kfree(iocb->private);
struct socket *sock;
int flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
sock = SOCKET_I(file->f_dentry->d_inode);
flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
}
out:
- if (sock->sk != sk)
- BUG();
release_sock(sock->sk);
return 0;
}
#endif
}
-int tux_Dprintk;
-int tux_TDprintk;
-
-#ifdef CONFIG_TUX_MODULE
-
-asmlinkage long (*sys_tux_ptr) (unsigned int action, user_req_t *u_info) = NULL;
-
-struct module *tux_module = NULL;
-spinlock_t tux_module_lock = SPIN_LOCK_UNLOCKED;
-
-asmlinkage long sys_tux (unsigned int action, user_req_t *u_info)
-{
- int ret;
-
- if (current->tux_info)
- return sys_tux_ptr(action, u_info);
-
- ret = -ENOSYS;
- spin_lock(&tux_module_lock);
- if (!tux_module)
- goto out_unlock;
- if (!try_module_get(tux_module))
- goto out_unlock;
- spin_unlock(&tux_module_lock);
-
- if (!sys_tux_ptr)
- TUX_BUG();
- ret = sys_tux_ptr(action, u_info);
-
- spin_lock(&tux_module_lock);
- module_put(tux_module);
-out_unlock:
- spin_unlock(&tux_module_lock);
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(tux_module);
-EXPORT_SYMBOL_GPL(tux_module_lock);
-EXPORT_SYMBOL_GPL(sys_tux_ptr);
-
-EXPORT_SYMBOL_GPL(tux_Dprintk);
-EXPORT_SYMBOL_GPL(tux_TDprintk);
-
-#endif
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
EXPORT_SYMBOL(sock_unregister);
EXPORT_SYMBOL(sock_wake_async);
EXPORT_SYMBOL(sockfd_lookup);
-EXPORT_SYMBOL(kernel_sendmsg);
-EXPORT_SYMBOL(kernel_recvmsg);
#include <linux/socket.h>
#include <linux/sunrpc/clnt.h>
#include <linux/spinlock.h>
-#include <linux/vserver/xid.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
get_group_info(current->group_info);
acred.uid = current->fsuid;
acred.gid = current->fsgid;
- acred.xid = current->xid;
acred.group_info = current->group_info;
dprintk("RPC: looking up %s cred\n",
get_group_info(current->group_info);
acred.uid = current->fsuid;
acred.gid = current->fsgid;
- acred.xid = current->xid;
acred.group_info = current->group_info;
dprintk("RPC: %4d looking up %s cred\n",
static ssize_t
gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
+ char *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
#define MSG_BUF_MAXSIZE 1024
static ssize_t
-gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
{
struct xdr_netobj obj = {
.len = mlen,
struct rpc_rqst *req = task->tk_rqstp;
u32 maj_stat = 0;
struct xdr_netobj mic;
- struct kvec iov;
+ struct iovec iov;
struct xdr_buf verf_buf;
u32 service;
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
u32 seq, qop_state;
- struct kvec iov;
+ struct iovec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
u32 flav,len;
u32 *integ_len = NULL;
struct xdr_netobj mic;
u32 offset, *q;
- struct kvec *iov;
+ struct iovec *iov;
u32 maj_stat = 0;
int status = -EIO;
}
static inline int
-svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
+svc_safe_getnetobj(struct iovec *argv, struct xdr_netobj *o)
{
int l;
}
static inline int
-svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
+svc_safe_putnetobj(struct iovec *resv, struct xdr_netobj *o)
{
u32 *p;
struct xdr_buf rpchdr;
struct xdr_netobj checksum;
u32 flavor = 0;
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec iov;
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec iov;
/* data to compute the checksum over: */
iov.iov_base = rpcstart;
struct xdr_buf verf_data;
struct xdr_netobj mic;
u32 *p;
- struct kvec iov;
+ struct iovec iov;
svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS));
xdr_seq = htonl(seq);
static int
svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
u32 crlen;
struct xdr_netobj tmpobj;
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct xdr_buf *resbuf = &rqstp->rq_res;
struct xdr_buf integ_buf;
struct xdr_netobj mic;
- struct kvec *resv;
+ struct iovec *resv;
u32 *p;
int integ_offset, integ_len;
int stat = -EINVAL;
#include <linux/in.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
-#include <linux/vserver/xid.h>
#define NFS_NGROUPS 16
struct unx_cred {
struct rpc_cred uc_base;
gid_t uc_gid;
- xid_t uc_xid;
uid_t uc_puid; /* process uid */
gid_t uc_pgid; /* process gid */
- xid_t uc_pxid; /* process xid */
gid_t uc_gids[NFS_NGROUPS];
};
#define uc_uid uc_base.cr_uid
if (flags & RPC_TASK_ROOTCREDS) {
cred->uc_uid = cred->uc_puid = 0;
cred->uc_gid = cred->uc_pgid = 0;
- cred->uc_xid = cred->uc_pxid = current->xid;
cred->uc_gids[0] = NOGROUP;
} else {
int groups = acred->group_info->ngroups;
cred->uc_uid = acred->uid;
cred->uc_gid = acred->gid;
- cred->uc_xid = acred->xid;
cred->uc_puid = current->uid;
cred->uc_pgid = current->gid;
- cred->uc_pxid = current->xid;
for (i = 0; i < groups; i++)
cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
if (i < NFS_NGROUPS)
if (cred->uc_uid != acred->uid
|| cred->uc_gid != acred->gid
- || cred->uc_xid != acred->xid
|| cred->uc_puid != current->uid
- || cred->uc_pgid != current->gid
- || cred->uc_pxid != current->xid)
+ || cred->uc_pgid != current->gid)
return 0;
groups = acred->group_info->ngroups;
struct rpc_clnt *clnt = task->tk_client;
struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred;
u32 *base, *hold;
- int i, tagxid;
+ int i;
*p++ = htonl(RPC_AUTH_UNIX);
base = p++;
* Copy the UTS nodename captured when the client was created.
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
- tagxid = task->tk_client->cl_tagxid;
/* Note: we don't use real uid if it involves raising privilege */
if (ruid && cred->uc_puid != 0 && cred->uc_pgid != 0) {
- *p++ = htonl((u32) XIDINO_UID(tagxid,
- cred->uc_puid, cred->uc_pxid));
- *p++ = htonl((u32) XIDINO_GID(tagxid,
- cred->uc_pgid, cred->uc_pxid));
+ *p++ = htonl((u32) cred->uc_puid);
+ *p++ = htonl((u32) cred->uc_pgid);
} else {
- *p++ = htonl((u32) XIDINO_UID(tagxid,
- cred->uc_uid, cred->uc_xid));
- *p++ = htonl((u32) XIDINO_GID(tagxid,
- cred->uc_gid, cred->uc_xid));
+ *p++ = htonl((u32) cred->uc_uid);
+ *p++ = htonl((u32) cred->uc_gid);
}
hold = p++;
for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
int err;
+ if (ppos != &filp->f_pos)
+ return -ESPIPE;
+
if (count == 0)
return 0;
int err;
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ if (ppos != &filp->f_pos)
+ return -ESPIPE;
+
if (count == 0)
return 0;
if (count >= sizeof(write_buf))
{
struct cache_reader *rp = NULL;
- nonseekable_open(inode, filp);
if (filp->f_mode & FMODE_READ) {
struct cache_detail *cd = PDE(inode)->data;
}
static struct file_operations cache_flush_operations = {
- .open = nonseekable_open,
.read = read_flush,
.write = write_flush,
};
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct kvec * argv = &rqstp->rq_arg.head[0];
- struct kvec * resv = &rqstp->rq_res.head[0];
+ struct iovec * argv = &rqstp->rq_arg.head[0];
+ struct iovec * resv = &rqstp->rq_res.head[0];
kxdrproc_t xdr;
u32 *statp;
u32 dir, prog, vers, proc,
static int
svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
int rv=0;
struct ip_map key, *ipm;
int
svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
struct svc_cred *cred = &rqstp->rq_cred;
u32 slen, i;
int len = argv->iov_len;
* Generic recvfrom routine.
*/
static int
-svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
+svc_recvfrom(struct svc_rqst *rqstp, struct iovec *iov, int nr, int buflen)
{
+ mm_segment_t oldfs;
struct msghdr msg;
struct socket *sock;
int len, alen;
msg.msg_name = &rqstp->rq_addr;
msg.msg_namelen = sizeof(rqstp->rq_addr);
+ msg.msg_iov = iov;
+ msg.msg_iovlen = nr;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_DONTWAIT;
- len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_recvmsg(sock, &msg, buflen, MSG_DONTWAIT);
+ set_fs(oldfs);
/* sock_recvmsg doesn't fill in the name/namelen, so we must..
* possibly we should cache this in the svc_sock structure
struct svc_sock *svsk = rqstp->rq_sock;
struct svc_serv *serv = svsk->sk_server;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int pnum, vlen;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
*/
if (svsk->sk_tcplen < 4) {
unsigned long want = 4 - svsk->sk_tcplen;
- struct kvec iov;
+ struct iovec iov;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
int sent;
u32 reclen;
- /* Set up the first element of the reply kvec.
- * Any other kvecs that may be in use have been taken
+ /* Set up the first element of the reply iovec.
+ * Any other iovecs that may be in use have been taken
* care of by the server implementation itself.
*/
reclen = htonl(0x80000000|((xbufp->len ) - 4));
static int
proc_dodebug(ctl_table *table, int write, struct file *file,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char tmpbuf[20], c, *s;
char __user *p;
unsigned int value;
size_t left, len;
- if ((*ppos && !write) || !*lenp) {
+ if ((file->f_pos && !write) || !*lenp) {
*lenp = 0;
return 0;
}
done:
*lenp -= left;
- *ppos += *lenp;
+ file->f_pos += *lenp;
return 0;
}
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
- struct kvec *tail = xdr->tail;
+ struct iovec *tail = xdr->tail;
u32 *p;
xdr->pages = pages;
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
- struct kvec *head = xdr->head;
- struct kvec *tail = xdr->tail;
+ struct iovec *head = xdr->head;
+ struct iovec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
}
/*
- * Realign the kvec if the server missed out some reply elements
+ * Realign the iovec if the server missed out some reply elements
* (such as post-op attributes,...)
* Note: This is a simple implementation that assumes that
* len <= iov->iov_len !!!
* The RPC header (assumed to be the 1st element in the iov array)
* is not shifted.
*/
-void xdr_shift_iovec(struct kvec *iov, int nr, size_t len)
+void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
{
- struct kvec *pvec;
+ struct iovec *pvec;
for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
- struct kvec *svec = pvec - 1;
+ struct iovec *svec = pvec - 1;
if (len > pvec->iov_len) {
printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
}
/*
- * Map a struct xdr_buf into an kvec array.
+ * Map a struct xdr_buf into an iovec array.
*/
-int xdr_kmap(struct kvec *iov_base, struct xdr_buf *xdr, size_t base)
+int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, size_t base)
{
- struct kvec *iov = iov_base;
+ struct iovec *iov = iov_base;
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
unsigned int len, pglen = xdr->page_len;
int err, ret = 0;
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ mm_segment_t oldfs;
len = xdr->head[0].iov_len;
if (base < len || (addr != NULL && base == 0)) {
- struct kvec iov = {
+ struct iovec iov = {
.iov_base = xdr->head[0].iov_base + base,
.iov_len = len - base,
};
.msg_namelen = addrlen,
.msg_flags = msgflags,
};
+
+ if (iov.iov_len != 0) {
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ }
if (xdr->len > len)
msg.msg_flags |= MSG_MORE;
-
- if (iov.iov_len != 0)
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- else
- err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
+ oldfs = get_fs(); set_fs(get_ds());
+ err = sock_sendmsg(sock, &msg, iov.iov_len);
+ set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
copy_tail:
len = xdr->tail[0].iov_len;
if (base < len) {
- struct kvec iov = {
+ struct iovec iov = {
.iov_base = xdr->tail[0].iov_base + base,
.iov_len = len - base,
};
struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
.msg_flags = msgflags,
};
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+ oldfs = get_fs(); set_fs(get_ds());
+ err = sock_sendmsg(sock, &msg, iov.iov_len);
+ set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
*
- * Shrinks XDR buffer's header kvec buf->head[0] by
+ * Shrinks XDR buffer's header iovec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
void
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
- struct kvec *head, *tail;
+ struct iovec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
void
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
- struct kvec *tail;
+ struct iovec *tail;
size_t copy;
char *p;
unsigned int pglen = buf->page_len;
* @p: current pointer inside XDR buffer
*
* Note: at the moment the RPC client only passes the length of our
- * scratch buffer in the xdr_buf's header kvec. Previously this
+ * scratch buffer in the xdr_buf's header iovec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
- * of the buffer length, and takes care of adjusting the kvec
+ * of the buffer length, and takes care of adjusting the iovec
* length for us.
*/
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct kvec *iov = buf->head;
+ struct iovec *iov = buf->head;
xdr->buf = buf;
xdr->iov = iov;
*
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
- * adjust the length of the current kvec.
+ * adjust the length of the current iovec.
*/
uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct kvec *iov = buf->tail;
+ struct iovec *iov = buf->tail;
buf->pages = pages;
buf->page_base = base;
buf->page_len = len;
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct kvec *iov = buf->head;
+ struct iovec *iov = buf->head;
unsigned int len = iov->iov_len;
if (len > buf->len)
void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct kvec *iov;
+ struct iovec *iov;
ssize_t shift;
unsigned int end;
int padding;
}
EXPORT_SYMBOL(xdr_read_pages);
-static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
+static struct iovec empty_iov = {.iov_base = NULL, .iov_len = 0};
void
-xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
+xdr_buf_from_iov(struct iovec *iov, struct xdr_buf *buf)
{
buf->head[0] = *iov;
buf->tail[0] = empty_iov;
* length of subiov to zero. Decrements len by length of subiov, sets base
* to zero (or decrements it by length of iov if subiov is empty). */
static void
-iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
+iov_subsegment(struct iovec *iov, struct iovec *subiov, int *base, int *len)
{
if (*base > iov->iov_len) {
subiov->iov_base = NULL;
/*
* Reserve an RPC call slot.
*/
+void
+xprt_reserve(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ task->tk_status = -EIO;
+ if (!xprt->shutdown) {
+ spin_lock(&xprt->xprt_lock);
+ do_xprt_reserve(task);
+ spin_unlock(&xprt->xprt_lock);
+ if (task->tk_rqstp)
+ del_timer_sync(&xprt->timer);
+ }
+}
+
static inline void
do_xprt_reserve(struct rpc_task *task)
{
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
-void
-xprt_reserve(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- task->tk_status = -EIO;
- if (!xprt->shutdown) {
- spin_lock(&xprt->xprt_lock);
- do_xprt_reserve(task);
- spin_unlock(&xprt->xprt_lock);
- if (task->tk_rqstp)
- del_timer_sync(&xprt->timer);
- }
-}
-
/*
* Allocate a 'unique' XID
*/
+++ /dev/null
-
-config TUX
- tristate "TUX: Threaded linUX application protocol accelerator layer"
- default y if INET=y
- select ZLIB_DEFLATE
- help
- This is the TUX content-accelerator/server
-
-menu "TUX options"
- depends on TUX
-
-config TUX_EXTCGI
- bool "External CGI module"
- default y
-
-config TUX_EXTENDED_LOG
- bool "extended TUX logging format"
- default n
-
-config TUX_DEBUG
- bool "debug TUX"
- default n
-
-endmenu
-
+++ /dev/null
-#
-# Makefile for TUX
-#
-
-obj-$(CONFIG_TUX) += tux.o
-
-tux-y := accept.o input.o userspace.o cachemiss.o output.o \
- redirect.o postpone.o logger.o proto_http.o proto_ftp.o \
- proc.o main.o mod.o abuf.o times.o directory.o gzip.o
-
-tux-$(subst m,y,$(CONFIG_TUX_EXTCGI)) += cgi.o extcgi.o
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * abuf.c: async buffer-sending
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char * get_abuf (tux_req_t *req, unsigned int max_size)
-{
- threadinfo_t *ti = req->ti;
- struct page *page;
- char *buf;
- unsigned int offset;
- unsigned int left;
-
- if (req->abuf.page || req->abuf.buf || req->abuf.size)
- TUX_BUG();
-
- if (max_size > PAGE_SIZE)
- BUG();
- offset = ti->header_offset;
- if (offset > PAGE_SIZE)
- TUX_BUG();
- left = PAGE_SIZE - offset;
- if (!max_size)
- BUG();
- page = ti->header_cache;
- if ((left < max_size) || !page) {
- while (!(page = alloc_pages(GFP_KERNEL, 0))) {
- if (net_ratelimit())
- printk(KERN_WARNING "tux: OOM in get_abuf()!\n");
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(1);
- }
-
- if (ti->header_cache)
- __free_page(ti->header_cache);
- ti->header_cache = page;
- ti->header_offset = 0;
- offset = 0;
- }
- buf = page_address(page) + offset;
-
- if (!page)
- BUG();
- req->abuf.page = page;
- req->abuf.buf = buf;
- req->abuf.size = 0;
- req->abuf.offset = offset;
- req->abuf.flags = 0;
- get_page(req->abuf.page);
-
- return buf;
-}
-
-static void do_send_abuf (tux_req_t *req, int cachemiss);
-
-void send_abuf (tux_req_t *req, unsigned int size, unsigned long flags)
-{
- threadinfo_t *ti = req->ti;
-
- Dprintk("send_abuf(req: %p, sock: %p): %p(%p), size:%d, off:%d, flags:%08lx\n", req, req->sock, req->abuf.page, req->abuf.buf, size, req->abuf.offset, flags);
-
- ti->header_offset += size;
- if (ti->header_offset > PAGE_SIZE)
- TUX_BUG();
- if (req->abuf.offset + req->abuf.size > PAGE_SIZE)
- TUX_BUG();
-
- req->abuf.flags = flags | MSG_NOSIGNAL;
- req->abuf.size = size;
-
- add_tux_atom(req, do_send_abuf);
-}
-
-static void do_send_abuf (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (!req->sock)
- TUX_BUG();
- tcp_sk(req->sock->sk)->nonagle = 2;
-
-repeat:
- Dprintk("do_send_abuf(%p,%d): %p(%p), size:%d, off:%d, flags:%08lx\n",
- req, cachemiss,
- req->abuf.page, req->abuf.buf, req->abuf.size,
- req->abuf.offset, req->abuf.flags);
-
- if (tux_zerocopy_header)
- ret = tcp_sendpage(req->sock, req->abuf.page,
- req->abuf.offset, req->abuf.size, req->abuf.flags);
- else {
- mm_segment_t oldmm;
- oldmm = get_fs(); set_fs(KERNEL_DS);
- ret = send_sync_buf(req, req->sock, req->abuf.buf,
- req->abuf.size, req->abuf.flags);
- set_fs(oldmm);
- }
-
-
- Dprintk("do_send_abuf: ret: %d\n", ret);
- if (!ret)
- TUX_BUG();
-
- if (ret < 0) {
- if (ret != -EAGAIN) {
- TDprintk("ret: %d, req->error = TUX_ERROR_CONN_CLOSE.\n", ret);
- req->error = TUX_ERROR_CONN_CLOSE;
- req->atom_idx = 0;
- req->in_file.f_pos = 0;
- __free_page(req->abuf.page);
- memset(&req->abuf, 0, sizeof(req->abuf));
- zap_request(req, cachemiss);
- return;
- }
- add_tux_atom(req, do_send_abuf);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- return;
- }
-
- req->abuf.buf += ret;
- req->abuf.offset += ret;
- req->abuf.size -= ret;
-
- if ((int)req->abuf.size < 0)
- TUX_BUG();
- if (req->abuf.size > 0)
- goto repeat;
-
- Dprintk("DONE do_send_abuf: %p(%p), size:%d, off:%d, flags:%08lx\n",
- req->abuf.page, req->abuf.buf, req->abuf.size,
- req->abuf.offset, req->abuf.flags);
-
- __free_page(req->abuf.page);
-
- memset(&req->abuf, 0, sizeof(req->abuf));
-
- add_req_to_workqueue(req);
-}
-
-void __send_async_message (tux_req_t *req, const char *message,
- int status, unsigned int size, int push)
-{
- unsigned int flags;
- char *buf;
-
- Dprintk("TUX: sending %d reply (%d bytes)!\n", status, size);
- Dprintk("request %p, reply: %s\n", req, message);
- if (!size)
- TUX_BUG();
- buf = get_abuf(req, size);
- memcpy(buf, message, size);
-
- req->status = status;
- flags = MSG_DONTWAIT;
- if (!push)
- flags |= MSG_MORE;
- send_abuf(req, size, flags);
- add_req_to_workqueue(req);
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * accept.c: accept new connections, allocate requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-unsigned int tux_ack_pingpong = 1;
-unsigned int tux_push_all = 0;
-unsigned int tux_zerocopy_parse = 1;
-
-static int __idle_event (tux_req_t *req);
-static int __output_space_event (tux_req_t *req);
-
-struct socket * start_listening(tux_socket_t *listen, int nr)
-{
- struct sockaddr_in sin;
- struct socket *sock = NULL;
- struct sock *sk;
- struct tcp_opt *tp;
- int err;
- u16 port = listen->port;
- u32 addr = listen->ip;
- tux_proto_t *proto = listen->proto;
-
- /* Create a listening socket: */
-
- err = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err) {
- printk(KERN_ERR "TUX: error %d creating socket.\n", err);
- goto error;
- }
-
- /* Bind the socket: */
-
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(addr);
- sin.sin_port = htons(port);
-
- sk = sock->sk;
- sk->sk_reuse = 1;
- sock_set_flag(sk, SOCK_URGINLINE);
-
- err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
- if (err) {
- printk(KERN_ERR "TUX: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %s://%d.%d.%d.%d:%d.\n",
- err, proto->name, HIPQUAD(addr), port);
- goto error;
- }
-
- tp = tcp_sk(sk);
- Dprintk("listen sk accept_queue: %p/%p.\n",
- tp->accept_queue, tp->accept_queue_tail);
- tp->ack.pingpong = tux_ack_pingpong;
-
- sock_reset_flag(sk, SOCK_LINGER);
- sk->sk_lingertime = 0;
- tp->linger2 = tux_keepalive_timeout * HZ;
-
- if (proto->defer_accept && !tux_keepalive_timeout && tux_defer_accept)
- tp->defer_accept = 1;
-
- /* Now, start listening on the socket */
-
- err = sock->ops->listen(sock, tux_max_backlog);
- if (err) {
- printk(KERN_ERR "TUX: error %d listening on socket.\n", err);
- goto error;
- }
-
- printk(KERN_NOTICE "TUX: thread %d listens on %s://%d.%d.%d.%d:%d.\n",
- nr, proto->name, HIPQUAD(addr), port);
- return sock;
-
-error:
- if (sock)
- sock_release(sock);
- return NULL;
-}
-
-static inline void __kfree_req (tux_req_t *req, threadinfo_t * ti)
-{
- list_del(&req->all);
- DEBUG_DEL_LIST(&req->all);
- ti->nr_requests--;
- kfree(req);
-}
-
-int flush_freequeue (threadinfo_t * ti)
-{
- struct list_head *tmp;
- unsigned long flags;
- tux_req_t *req;
- int count = 0;
-
- spin_lock_irqsave(&ti->free_requests_lock,flags);
- while (ti->nr_free_requests) {
- ti->nr_free_requests--;
- tmp = ti->free_requests.next;
- req = list_entry(tmp, tux_req_t, free);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- DEC_STAT(nr_free_pending);
- __kfree_req(req, ti);
- count++;
- }
- spin_unlock_irqrestore(&ti->free_requests_lock,flags);
-
- return count;
-}
-
-static tux_req_t * kmalloc_req (threadinfo_t * ti)
-{
- struct list_head *tmp;
- unsigned long flags;
- tux_req_t *req;
-
- spin_lock_irqsave(&ti->free_requests_lock, flags);
- if (ti->nr_free_requests) {
- ti->nr_free_requests--;
- tmp = ti->free_requests.next;
- req = list_entry(tmp, tux_req_t, free);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- DEC_STAT(nr_free_pending);
- req->magic = TUX_MAGIC;
- spin_unlock_irqrestore(&ti->free_requests_lock, flags);
- } else {
- spin_unlock_irqrestore(&ti->free_requests_lock, flags);
- req = tux_kmalloc(sizeof(*req));
- ti->nr_requests++;
- memset (req, 0, sizeof(*req));
- list_add(&req->all, &ti->all_requests);
- }
- req->magic = TUX_MAGIC;
- INC_STAT(nr_allocated);
- init_waitqueue_entry(&req->sleep, current);
- init_waitqueue_entry(&req->ftp_sleep, current);
- INIT_LIST_HEAD(&req->work);
- INIT_LIST_HEAD(&req->free);
- INIT_LIST_HEAD(&req->lru);
- req->ti = ti;
- req->total_bytes = 0;
- SET_TIMESTAMP(req->accept_timestamp);
- req->first_timestamp = jiffies;
- req->fd = -1;
- init_timer(&req->keepalive_timer);
- init_timer(&req->output_timer);
-
- Dprintk("allocated NEW req %p.\n", req);
- return req;
-}
-
-void kfree_req (tux_req_t *req)
-{
- threadinfo_t * ti = req->ti;
- unsigned long flags;
-
- Dprintk("freeing req %p.\n", req);
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- spin_lock_irqsave(&ti->free_requests_lock,flags);
- req->magic = 0;
- DEC_STAT(nr_allocated);
- if (req->sock || req->dentry || req->private)
- TUX_BUG();
- if (ti->nr_free_requests > tux_max_free_requests)
- __kfree_req(req, ti);
- else {
- req->error = 0;
- ti->nr_free_requests++;
-
- // the free requests queue is LIFO
- list_add(&req->free, &ti->free_requests);
- INC_STAT(nr_free_pending);
- }
- spin_unlock_irqrestore(&ti->free_requests_lock,flags);
-}
-
-static void __add_req_to_workqueue (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- if (!list_empty(&req->work))
- TUX_BUG();
- Dprintk("work-queueing request %p at %p/%p.\n", req, __builtin_return_address(0), __builtin_return_address(1));
- if (connection_too_fast(req))
- list_add_tail(&req->work, &ti->work_pending);
- else
- list_add(&req->work, &ti->work_pending);
- INC_STAT(nr_work_pending);
- wake_up_process(ti->thread);
- return;
-}
-
-void add_req_to_workqueue (tux_req_t *req)
-{
- unsigned long flags;
- threadinfo_t *ti = req->ti;
-
- spin_lock_irqsave(&ti->work_lock, flags);
- __add_req_to_workqueue(req);
- spin_unlock_irqrestore(&ti->work_lock, flags);
-}
-
-void del_output_timer (tux_req_t *req)
-{
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
- if (!list_empty(&req->lru)) {
- list_del(&req->lru);
- DEBUG_DEL_LIST(&req->lru);
- req->ti->nr_lru--;
- }
- Dprintk("del output timeout for req %p.\n", req);
- del_timer(&req->output_timer);
-}
-
-static void output_timeout_fn (unsigned long data);
-
-#define OUTPUT_TIMEOUT HZ
-
-static void add_output_timer (tux_req_t *req)
-{
- struct timer_list *timer = &req->output_timer;
-
- timer->data = (unsigned long) req;
- timer->function = &output_timeout_fn;
- mod_timer(timer, jiffies + OUTPUT_TIMEOUT);
-}
-
-static void output_timeout_fn (unsigned long data)
-{
- tux_req_t *req = (tux_req_t *)data;
-
- if (connection_too_fast(req)) {
- add_output_timer(req);
-// mod_timer(&req->output_timer, jiffies + OUTPUT_TIMEOUT);
- return;
- }
- output_space_event(req);
-}
-
-void output_timeout (tux_req_t *req)
-{
- Dprintk("output timeout for req %p.\n", req);
- if (test_and_set_bit(0, &req->wait_output_space))
- TUX_BUG();
- INC_STAT(nr_output_space_pending);
- add_output_timer(req);
-}
-
-void __del_keepalive_timer (tux_req_t *req)
-{
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
- if (!list_empty(&req->lru)) {
- list_del(&req->lru);
- DEBUG_DEL_LIST(&req->lru);
- req->ti->nr_lru--;
- }
- Dprintk("del keepalive timeout for req %p.\n", req);
- del_timer(&req->keepalive_timer);
-}
-
-static void keepalive_timeout_fn (unsigned long data)
-{
- tux_req_t *req = (tux_req_t *)data;
-
-#if CONFIG_TUX_DEBUG
- Dprintk("req %p timed out after %d sec!\n", req, tux_keepalive_timeout);
- if (tux_Dprintk)
- print_req(req);
-#endif
- Dprintk("req->error = TUX_ERROR_CONN_TIMEOUT!\n");
- req->error = TUX_ERROR_CONN_TIMEOUT;
- if (!idle_event(req))
- output_space_event(req);
-}
-
-void __add_keepalive_timer (tux_req_t *req)
-{
- struct timer_list *timer = &req->keepalive_timer;
-
- if (!tux_keepalive_timeout)
- TUX_BUG();
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
-
- if (!list_empty(&req->lru))
- TUX_BUG();
- if (req->ti->nr_lru > tux_max_keepalives) {
- struct list_head *head, *last;
- tux_req_t *last_req;
-
- head = &req->ti->lru;
- last = head->prev;
- if (last == head)
- TUX_BUG();
- last_req = list_entry(last, tux_req_t, lru);
- list_del(last);
- DEBUG_DEL_LIST(last);
- req->ti->nr_lru--;
-
- Dprintk("LRU-aging req %p!\n", last_req);
- last_req->error = TUX_ERROR_CONN_TIMEOUT;
- if (!__idle_event(last_req))
- __output_space_event(last_req);
- }
- list_add(&req->lru, &req->ti->lru);
- req->ti->nr_lru++;
-
- timer->expires = jiffies + tux_keepalive_timeout * HZ;
- timer->data = (unsigned long) req;
- timer->function = &keepalive_timeout_fn;
- add_timer(timer);
-}
-
-static int __output_space_event (tux_req_t *req)
-{
- if (!req || (req->magic != TUX_MAGIC))
- TUX_BUG();
-
- if (!test_and_clear_bit(0, &req->wait_output_space)) {
- Dprintk("output space ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
- return 0;
- }
-
- Dprintk("output space ready event at <%p>, %p was waiting!\n", __builtin_return_address(0), req);
- DEC_STAT(nr_output_space_pending);
-
- del_keepalive_timer(req);
- del_output_timer(req);
-
- __add_req_to_workqueue(req);
- return 1;
-}
-
-int output_space_event (tux_req_t *req)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&req->ti->work_lock, flags);
- ret = __output_space_event(req);
- spin_unlock_irqrestore(&req->ti->work_lock, flags);
-
- return ret;
-}
-
-static int __idle_event (tux_req_t *req)
-{
- struct tcp_opt *tp;
- threadinfo_t *ti;
-
- if (!req || (req->magic != TUX_MAGIC))
- TUX_BUG();
- ti = req->ti;
-
- if (!test_and_clear_bit(0, &req->idle_input)) {
- Dprintk("data ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
- return 0;
- }
-
- Dprintk("data ready event at <%p>, %p was idle!\n", __builtin_return_address(0), req);
- del_keepalive_timer(req);
- del_output_timer(req);
- DEC_STAT(nr_idle_input_pending);
-
- tp = tcp_sk(req->sock->sk);
-
- tp->ack.pingpong = tux_ack_pingpong;
- SET_TIMESTAMP(req->accept_timestamp);
-
- __add_req_to_workqueue(req);
-
- return 1;
-}
-
-int idle_event (tux_req_t *req)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&req->ti->work_lock, flags);
- ret = __idle_event(req);
- spin_unlock_irqrestore(&req->ti->work_lock, flags);
-
- return ret;
-}
-
-#define HANDLE_CALLBACK_1(callback, tux_name, real_name, param...) \
- tux_req_t *req; \
- \
- read_lock(&sk->sk_callback_lock); \
- req = sk->sk_user_data; \
- \
- Dprintk("callback "#callback"(%p) req %p.\n", \
- sk->sk_##callback, req); \
- \
- if (!req) { \
- if (sk->sk_##callback == tux_name) { \
- printk("BUG: "#callback" "#tux_name" "#real_name" no req!"); \
- TUX_BUG(); \
- } \
- read_unlock(&sk->sk_callback_lock); \
- if (sk->sk_##callback) \
- sk->sk_##callback(param); \
- return; \
- } \
-
-#define HANDLE_CALLBACK_2(callback, tux_name, real_name, param...) \
- Dprintk(#tux_name"() on %p.\n", req); \
- if (req->magic != TUX_MAGIC) \
- TUX_BUG(); \
- if (req->real_name) \
- req->real_name(param);
-
-#define HANDLE_CALLBACK(callback, tux_name, real_name, param...) \
- HANDLE_CALLBACK_1(callback,tux_name,real_name,param) \
- HANDLE_CALLBACK_2(callback,tux_name,real_name,param)
-
-static void tux_data_ready (struct sock *sk, int len)
-{
- HANDLE_CALLBACK_1(data_ready, tux_data_ready, real_data_ready, sk, len);
-
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_write_space (struct sock *sk)
-{
- HANDLE_CALLBACK(write_space, tux_write_space, real_write_space, sk);
-
- Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
- sk->sk_wmem_queued, sk->sk_sndbuf);
-
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- if (!idle_event(req))
- output_space_event(req);
- }
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_error_report (struct sock *sk)
-{
- HANDLE_CALLBACK(error_report, tux_error_report, real_error_report, sk);
-
- req->error = TUX_ERROR_CONN_CLOSE;
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_state_change (struct sock *sk)
-{
- HANDLE_CALLBACK(state_change, tux_state_change, real_state_change, sk);
-
- if (req->sock && req->sock->sk &&
- (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
- Dprintk("req %p changed to TCP non-established!\n", req);
- Dprintk("req->sock: %p\n", req->sock);
- if (req->sock)
- Dprintk("req->sock->sk: %p\n", req->sock->sk);
- if (req->sock && req->sock->sk)
- Dprintk("TCP state: %d\n", req->sock->sk->sk_state);
- Dprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- }
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_destruct (struct sock *sk)
-{
- BUG();
-}
-
-static void tux_ftp_data_ready (struct sock *sk, int len)
-{
- HANDLE_CALLBACK_1(data_ready, tux_ftp_data_ready,
- ftp_real_data_ready, sk, len);
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_write_space (struct sock *sk)
-{
- HANDLE_CALLBACK_1(write_space, tux_ftp_write_space,
- ftp_real_write_space, sk);
-
- Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
- sk->sk_wmem_queued, sk->sk_sndbuf);
-
- if (sk_stream_wspace(sk) >= sk->sk_sndbuf/10*8) {
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- if (!idle_event(req))
- output_space_event(req);
- }
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_error_report (struct sock *sk)
-{
- HANDLE_CALLBACK(error_report, tux_ftp_error_report,
- ftp_real_error_report, sk);
-
- TDprintk("req %p sock %p got TCP errors on FTP data connection!\n", req, sk);
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_state_change (struct sock *sk)
-{
- HANDLE_CALLBACK(state_change, tux_ftp_state_change,
- ftp_real_state_change, sk);
-
- if (req->sock && req->sock->sk &&
- (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
- Dprintk("req %p FTP control sock changed to TCP non-established!\n", req);
- Dprintk("req->sock: %p\n", req->sock);
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
-
- req->error = TUX_ERROR_CONN_CLOSE;
- }
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_create_child (struct sock *sk, struct sock *newsk)
-{
- HANDLE_CALLBACK(create_child, tux_ftp_create_child,
- ftp_real_create_child, sk, newsk);
-
- newsk->sk_user_data = NULL;
- newsk->sk_data_ready = req->ftp_real_data_ready;
- newsk->sk_state_change = req->ftp_real_state_change;
- newsk->sk_write_space = req->ftp_real_write_space;
- newsk->sk_error_report = req->ftp_real_error_report;
- newsk->sk_create_child = req->ftp_real_create_child;
- newsk->sk_destruct = req->ftp_real_destruct;
-
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_destruct (struct sock *sk)
-{
- BUG();
-}
-
-static void link_tux_socket (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (req->sock)
- TUX_BUG();
- if (sk->sk_destruct == tux_destruct)
- TUX_BUG();
- /*
- * (No need to lock the socket, we just want to
- * make sure that events from now on go through
- * tux_data_ready())
- */
- write_lock_irq(&sk->sk_callback_lock);
-
- req->sock = sock;
- sk->sk_user_data = req;
-
- req->real_data_ready = sk->sk_data_ready;
- req->real_state_change = sk->sk_state_change;
- req->real_write_space = sk->sk_write_space;
- req->real_error_report = sk->sk_error_report;
- req->real_destruct = sk->sk_destruct;
-
- sk->sk_data_ready = tux_data_ready;
- sk->sk_state_change = tux_state_change;
- sk->sk_write_space = tux_write_space;
- sk->sk_error_report = tux_error_report;
- sk->sk_destruct = tux_destruct;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- if (req->real_destruct == tux_destruct)
- TUX_BUG();
- req->client_addr = inet_sk(sk)->daddr;
- req->client_port = inet_sk(sk)->dport;
-
- add_wait_queue(sk->sk_sleep, &req->sleep);
-}
-
-void __link_data_socket (tux_req_t *req, struct socket *sock,
- struct sock *sk)
-{
- /*
- * (No need to lock the socket, we just want to
- * make sure that events from now on go through
- * tux_data_ready())
- */
- write_lock_irq(&sk->sk_callback_lock);
-
- req->data_sock = sock;
- sk->sk_user_data = req;
-
- req->ftp_real_data_ready = sk->sk_data_ready;
- req->ftp_real_state_change = sk->sk_state_change;
- req->ftp_real_write_space = sk->sk_write_space;
- req->ftp_real_error_report = sk->sk_error_report;
- req->ftp_real_create_child = sk->sk_create_child;
- req->ftp_real_destruct = sk->sk_destruct;
-
- sk->sk_data_ready = tux_ftp_data_ready;
- sk->sk_state_change = tux_ftp_state_change;
- sk->sk_write_space = tux_ftp_write_space;
- sk->sk_error_report = tux_ftp_error_report;
- sk->sk_create_child = tux_ftp_create_child;
- sk->sk_destruct = tux_ftp_destruct;
-
- if (req->ftp_real_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- add_wait_queue(sk->sk_sleep, &req->ftp_sleep);
-}
-
-void link_tux_data_socket (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (req->data_sock)
- TUX_BUG();
- if (sk->sk_destruct == tux_ftp_destruct)
- TUX_BUG();
- __link_data_socket(req, sock, sk);
-}
-
-void unlink_tux_socket (tux_req_t *req)
-{
- struct sock *sk;
-
- if (!req->sock || !req->sock->sk)
- return;
- sk = req->sock->sk;
-
- write_lock_irq(&sk->sk_callback_lock);
- if (!sk->sk_user_data)
- TUX_BUG();
- if (req->real_destruct == tux_destruct)
- TUX_BUG();
-
- sk->sk_user_data = NULL;
-
- sk->sk_data_ready = req->real_data_ready;
- sk->sk_state_change = req->real_state_change;
- sk->sk_write_space = req->real_write_space;
- sk->sk_error_report = req->real_error_report;
- sk->sk_destruct = req->real_destruct;
-
- if (sk->sk_destruct == tux_destruct)
- TUX_BUG();
-
- req->real_data_ready = NULL;
- req->real_state_change = NULL;
- req->real_write_space = NULL;
- req->real_error_report = NULL;
- req->real_destruct = NULL;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- remove_wait_queue(sk->sk_sleep, &req->sleep);
-}
-
-void unlink_tux_data_socket (tux_req_t *req)
-{
- struct sock *sk;
-
- if (!req->data_sock || !req->data_sock->sk)
- return;
- sk = req->data_sock->sk;
-
- write_lock_irq(&sk->sk_callback_lock);
-
- if (req->real_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- sk->sk_user_data = NULL;
- sk->sk_data_ready = req->ftp_real_data_ready;
- sk->sk_state_change = req->ftp_real_state_change;
- sk->sk_write_space = req->ftp_real_write_space;
- sk->sk_error_report = req->ftp_real_error_report;
- sk->sk_create_child = req->ftp_real_create_child;
- sk->sk_destruct = req->ftp_real_destruct;
-
- req->ftp_real_data_ready = NULL;
- req->ftp_real_state_change = NULL;
- req->ftp_real_write_space = NULL;
- req->ftp_real_error_report = NULL;
- req->ftp_real_create_child = NULL;
- req->ftp_real_destruct = NULL;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- if (sk->sk_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- remove_wait_queue(sk->sk_sleep, &req->ftp_sleep);
-}
-
-void add_tux_atom (tux_req_t *req, atom_func_t *atom)
-{
- Dprintk("adding TUX atom %p to req %p, atom_idx: %d, at %p/%p.\n",
- atom, req, req->atom_idx, __builtin_return_address(0), __builtin_return_address(1));
- if (req->atom_idx == MAX_TUX_ATOMS)
- TUX_BUG();
- req->atoms[req->atom_idx] = atom;
- req->atom_idx++;
-}
-
-void del_tux_atom (tux_req_t *req)
-{
- if (!req->atom_idx)
- TUX_BUG();
- req->atom_idx--;
- Dprintk("removing TUX atom %p to req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
-}
-
-void tux_schedule_atom (tux_req_t *req, int cachemiss)
-{
- if (!list_empty(&req->work))
- TUX_BUG();
- if (!req->atom_idx)
- TUX_BUG();
- req->atom_idx--;
- Dprintk("DOING TUX atom %p, req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
- might_sleep();
- req->atoms[req->atom_idx](req, cachemiss);
- might_sleep();
- Dprintk("DONE TUX atom %p, req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
-}
-
-/*
- * Puts newly accepted connections into the inputqueue. This is the
- * first step in the life of a TUX request.
- */
-int accept_requests (threadinfo_t *ti)
-{
- int count = 0, last_count = 0, error, socknr = 0;
- struct socket *sock, *new_sock;
- struct tcp_opt *tp1, *tp2;
- tux_req_t *req;
-
- if (ti->nr_requests > tux_max_connect)
- goto out;
-
-repeat:
- for (socknr = 0; socknr < CONFIG_TUX_NUMSOCKETS; socknr++) {
- tux_listen_t *tux_listen;
-
- tux_listen = ti->listen + socknr;
- sock = tux_listen->sock;
- if (!sock)
- break;
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- break;
-
- tp1 = tcp_sk(sock->sk);
- /*
- * Quick test to see if there are connections on the queue.
- * This is cheaper than accept() itself because this saves us
- * the allocation of a new socket. (Which doesn't seem to be
- * used anyway)
- */
- if (tp1->accept_queue) {
- tux_proto_t *proto;
-
- if (!count++)
- __set_task_state(current, TASK_RUNNING);
-
- new_sock = sock_alloc();
- if (!new_sock)
- goto out;
-
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
-
- error = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- if (error < 0)
- goto err;
- if (new_sock->sk->sk_state != TCP_ESTABLISHED)
- goto err;
-
- tp2 = tcp_sk(new_sock->sk);
- tp2->nonagle = 2;
- tp2->ack.pingpong = tux_ack_pingpong;
- new_sock->sk->sk_reuse = 1;
- sock_set_flag(new_sock->sk, SOCK_URGINLINE);
-
- /* Allocate a request-entry for the connection */
- req = kmalloc_req(ti);
- if (!req)
- BUG();
- link_tux_socket(req, new_sock);
-
- proto = req->proto = tux_listen->proto;
-
- proto->got_request(req);
- }
- }
- if (count != last_count) {
- last_count = count;
- goto repeat;
- }
-out:
- return count;
-err:
- sock_release(new_sock);
- goto out;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * cachemiss.c: handle the 'slow IO path' by queueing not-yet-cached
- * requests to the IO-thread pool. Dynamic load balancing is done
- * between IO threads, based on the number of requests they have pending.
- */
-
-#include <net/tux.h>
-#include <linux/delay.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void queue_cachemiss (tux_req_t *req)
-{
- iothread_t *iot = req->ti->iot;
-
- Dprintk("queueing_cachemiss(req:%p) (req->cwd_dentry: %p) at %p:%p.\n",
- req, req->cwd_dentry, __builtin_return_address(0), __builtin_return_address(1));
- if (req->idle_input || req->wait_output_space)
- TUX_BUG();
- req->had_cachemiss = 1;
- if (!list_empty(&req->work))
- TUX_BUG();
- spin_lock(&iot->async_lock);
- if (connection_too_fast(req))
- list_add_tail(&req->work, &iot->async_queue);
- else
- list_add(&req->work, &iot->async_queue);
- iot->nr_async_pending++;
- INC_STAT(nr_cachemiss_pending);
- spin_unlock(&iot->async_lock);
-
- wake_up(&iot->async_sleep);
-}
-
-static tux_req_t * get_cachemiss (iothread_t *iot)
-{
- struct list_head *tmp;
- tux_req_t *req = NULL;
-
- spin_lock(&iot->async_lock);
- if (!list_empty(&iot->async_queue)) {
-
- tmp = iot->async_queue.next;
- req = list_entry(tmp, tux_req_t, work);
-
- Dprintk("get_cachemiss(%p): got req %p.\n", iot, req);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- iot->nr_async_pending--;
- DEC_STAT(nr_cachemiss_pending);
-
- if (req->ti->iot != iot)
- TUX_BUG();
- }
- spin_unlock(&iot->async_lock);
- return req;
-}
-
-struct file * tux_open_file (char *filename, int mode)
-{
- struct file *filp;
-
- if (!filename)
- TUX_BUG();
-
- /* Rule no. 3 -- Does the file exist ? */
-
- filp = filp_open(filename, mode, 0600);
-
- if (IS_ERR(filp) || !filp || !filp->f_dentry)
- goto err;
-
-out:
- return filp;
-err:
- Dprintk("filp_open() error: %d.\n", (int)filp);
- filp = NULL;
- goto out;
-}
-
-static int cachemiss_thread (void *data)
-{
- tux_req_t *req;
- struct k_sigaction *ka;
- DECLARE_WAITQUEUE(wait, current);
- iothread_t *iot = data;
- int nr = iot->ti->cpu, wake_up;
-
- Dprintk("iot %p/%p got started.\n", iot, current);
- drop_permissions();
-
- spin_lock(&iot->async_lock);
- iot->threads++;
- sprintf(current->comm, "async IO %d/%d", nr, iot->threads);
-
-
- spin_lock_irq(¤t->sighand->siglock);
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
- siginitsetinv(¤t->blocked, sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- spin_unlock(&iot->async_lock);
-#if CONFIG_SMP
- {
- cpumask_t mask;
-
- if (cpu_isset(nr, cpu_online_map)) {
- cpus_clear(mask);
- cpu_set(nr, mask);
- set_cpus_allowed(current, mask);
- }
-
- }
-#endif
-
- add_wait_queue_exclusive(&iot->async_sleep, &wait);
-
- for (;;) {
- while (!list_empty(&iot->async_queue) &&
- (req = get_cachemiss(iot))) {
-
- if (!req->atom_idx) {
- add_tux_atom(req, flush_request);
- add_req_to_workqueue(req);
- continue;
- }
- tux_schedule_atom(req, 1);
- if (signal_pending(current))
- flush_all_signals();
- }
- if (signal_pending(current))
- flush_all_signals();
- if (!list_empty(&iot->async_queue))
- continue;
- if (iot->shutdown) {
- Dprintk("iot %p/%p got shutdown!\n", iot, current);
- break;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&iot->async_queue)) {
- Dprintk("iot %p/%p going to sleep.\n", iot, current);
- schedule();
- Dprintk("iot %p/%p got woken up.\n", iot, current);
- }
- __set_current_state(TASK_RUNNING);
- }
-
- remove_wait_queue(&iot->async_sleep, &wait);
-
- wake_up = 0;
- spin_lock(&iot->async_lock);
- if (!--iot->threads)
- wake_up = 1;
- spin_unlock(&iot->async_lock);
- Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
- if (wake_up) {
- Dprintk("iot %p/%p waking up master.\n", iot, current);
- wake_up(&iot->wait_shutdown);
- }
-
- return 0;
-}
-
-static void __stop_cachemiss_threads (iothread_t *iot)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- Dprintk("stopping async IO threads %p.\n", iot);
- add_wait_queue(&iot->wait_shutdown, &wait);
-
- spin_lock(&iot->async_lock);
- if (iot->shutdown)
- TUX_BUG();
- if (!iot->threads)
- TUX_BUG();
- iot->shutdown = 1;
- wake_up_all(&iot->async_sleep);
- spin_unlock(&iot->async_lock);
-
- __set_current_state(TASK_UNINTERRUPTIBLE);
- Dprintk("waiting for async IO threads %p to exit.\n", iot);
- schedule();
- remove_wait_queue(&iot->wait_shutdown, &wait);
-
- if (iot->threads)
- TUX_BUG();
- if (iot->nr_async_pending)
- TUX_BUG();
- Dprintk("stopped async IO threads %p.\n", iot);
-}
-
-void stop_cachemiss_threads (threadinfo_t *ti)
-{
- iothread_t *iot = ti->iot;
-
- if (!iot)
- TUX_BUG();
- if (iot->nr_async_pending)
- TUX_BUG();
- __stop_cachemiss_threads(iot);
- ti->iot = NULL;
- kfree(iot);
-}
-
-int start_cachemiss_threads (threadinfo_t *ti)
-{
- int i, pid;
-
- iothread_t *iot;
-
- iot = kmalloc(sizeof(*iot), GFP_KERNEL);
- if (!iot)
- return -ENOMEM;
- memset(iot, 0, sizeof(*iot));
-
- iot->ti = ti;
- iot->async_lock = SPIN_LOCK_UNLOCKED;
- iot->nr_async_pending = 0;
- INIT_LIST_HEAD(&iot->async_queue);
- init_waitqueue_head(&iot->async_sleep);
- init_waitqueue_head(&iot->wait_shutdown);
-
- for (i = 0; i < NR_IO_THREADS; i++) {
- pid = kernel_thread(cachemiss_thread, (void *)iot, 0);
- if (pid < 0) {
- printk(KERN_ERR "TUX: error %d creating IO thread!\n",
- pid);
- __stop_cachemiss_threads(iot);
- kfree(iot);
- return pid;
- }
- }
- ti->iot = iot;
- /*
- * Wait for all cachemiss threads to start up:
- */
- while (iot->threads != NR_IO_THREADS) {
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
- }
- return 0;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * cgi.c: user-space CGI (and other) code execution.
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static int exec_usermode(char *program_path, char *argv[], char *envp[])
-{
- int i, err;
-
- err = tux_chroot(tux_cgiroot);
- if (err) {
- printk(KERN_ERR "TUX: CGI chroot returned %d, /proc/sys/net/tux/cgiroot is probably set up incorrectly! Aborting CGI execution.\n", err);
- return err;
- }
-
- /* Allow execve args to be in kernel space. */
- set_fs(KERNEL_DS);
-
- flush_signals(current);
- spin_lock_irq(¤t->sighand->siglock);
- flush_signal_handlers(current, 1);
- spin_unlock_irq(¤t->sighand->siglock);
-
- for (i = 3; i < current->files->max_fds; i++ )
- if (current->files->fd[i])
- tux_close(i);
-
- err = execve(program_path, argv, envp);
- if (err < 0)
- return err;
- return 0;
-}
-
-static inline long tux_dup(unsigned int fildes)
-{
- int ret = -EBADF;
- struct file * file = fget(fildes);
-
- if (file)
- ret = dupfd(file, 0);
- return ret;
-}
-
-static int exec_helper (void * data)
-{
- exec_param_t *param = data;
- char **tmp;
- int ret;
-
- sprintf(current->comm,"doexec - %d", current->pid);
-#if CONFIG_SMP
- if (!tux_cgi_inherit_cpu) {
-
- cpumask_t cgi_mask, map;
-
- mask_to_cpumask(tux_cgi_cpu_mask, &cgi_mask);
- cpus_and(map, cpu_online_map, cgi_mask);
-
- if (!(cpus_empty(map)))
- set_cpus_allowed(current, cgi_mask);
- else
- set_cpus_allowed(current, cpu_online_map);
- }
-#endif
-
- if (!param)
- TUX_BUG();
- Dprintk("doing exec(%s).\n", param->command);
-
- Dprintk("argv: ");
- tmp = param->argv;
- while (*tmp) {
- Dprintk("{%s} ", *tmp);
- tmp++;
- }
- Dprintk("\n");
- Dprintk("envp: ");
- tmp = param->envp;
- while (*tmp) {
- Dprintk("{%s} ", *tmp);
- tmp++;
- }
- Dprintk("\n");
- /*
- * Set up stdin, stdout and stderr of the external
- * CGI application.
- */
- if (param->pipe_fds) {
- tux_close(1);
- tux_close(2);
- tux_close(4);
- if (tux_dup(3) != 1)
- TUX_BUG();
- if (tux_dup(5) != 2)
- TUX_BUG();
- tux_close(3);
- tux_close(5);
- // do not close on exec.
-#if 0
- sys_fcntl(0, F_SETFD, 0);
- sys_fcntl(1, F_SETFD, 0);
- sys_fcntl(2, F_SETFD, 0);
-#else
- spin_lock(¤t->files->file_lock);
- FD_CLR(0, current->files->close_on_exec);
- FD_CLR(1, current->files->close_on_exec);
- FD_CLR(2, current->files->close_on_exec);
- spin_unlock(¤t->files->file_lock);
-#endif
- }
- ret = exec_usermode(param->command, param->argv, param->envp);
- if (ret < 0)
- Dprintk("bug: exec() returned %d.\n", ret);
- else
- Dprintk("exec()-ed successfully!\n");
- return 0;
-}
-
-pid_t tux_exec_process (char *command, char **argv,
- char **envp, int pipe_fds,
- exec_param_t *param, int wait)
-{
- exec_param_t param_local;
- pid_t pid;
- struct k_sigaction *ka;
-
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
-
- if (!param && wait)
- param = ¶m_local;
-
- param->command = command;
- param->argv = argv;
- param->envp = envp;
- param->pipe_fds = pipe_fds;
-
-repeat_fork:
- pid = kernel_thread(exec_helper, (void*) param, CLONE_SIGHAND|SIGCHLD);
- Dprintk("kernel thread created PID %d.\n", pid);
- if (pid < 0) {
- printk(KERN_ERR "TUX: could not create new CGI kernel thread due to %d... retrying.\n", pid);
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- goto repeat_fork;
- }
- return pid;
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * directory.c: directory listing support
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char * tux_print_path (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt, char *buf, unsigned int max_len)
-{
- char *res;
- struct dentry *cwd, *root;
- struct vfsmount *cwd_mnt, *rootmnt;
-
- cwd = dget(dentry);
- cwd_mnt = mntget(mnt);
- root = dget(req->docroot_dentry);
- rootmnt = mntget(req->docroot_mnt);
-
- spin_lock(&dcache_lock);
- res = __d_path(cwd, cwd_mnt, root, rootmnt, buf, max_len);
- spin_unlock(&dcache_lock);
-
- dput(cwd);
- mntput(cwd_mnt);
- dput(root);
- mntput(rootmnt);
-
- return res;
-}
-
-/*
- * There are filesystems that do not fill in ->d_type correctly.
- * Determine file-type.
- */
-static int get_d_type (struct dentry *dentry)
-{
- unsigned int mode = dentry->d_inode->i_mode;
-
- if (S_ISREG(mode))
- return DT_REG;
- if (S_ISDIR(mode))
- return DT_DIR;
- if (S_ISLNK(mode))
- return DT_LNK;
- if (S_ISFIFO(mode))
- return DT_FIFO;
- if (S_ISSOCK(mode))
- return DT_SOCK;
- if (S_ISCHR(mode))
- return DT_CHR;
- if (S_ISBLK(mode))
- return DT_BLK;
- return 0;
-}
-
-static void do_dir_line (tux_req_t *req, int cachemiss)
-{
- struct linux_dirent64 *dirp, *dirp0;
- char string0[MAX_OBJECTNAME_LEN+200], *tmp;
- int len, curroff, total, str_len = 0;
- int err, flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct nameidata base;
- struct dentry *dentry = NULL;
- struct inode *inode = NULL;
- struct vfsmount *mnt = NULL;
-
- if (req->proto->check_req_err(req, cachemiss))
- return;
-
- tmp = NULL;
- dirp0 = req->dirp0;
- curroff = req->curroff;
- total = req->total;
-
- dirp = (struct linux_dirent64 *)((char *)dirp0 + curroff);
- if (!dirp->d_name || !dirp->d_name[0])
- goto next_dir;
- /*
- * Hide .xxxxx files:
- */
- if (dirp->d_name[0] == '.')
- goto next_dir;
- Dprintk("<%s T:%d (off:%Ld) (len:%d)>\n", dirp->d_name, dirp->d_type, dirp->d_off, dirp->d_reclen);
- if (tux_hide_unreadable) {
- switch (dirp->d_type) {
- default:
- goto next_dir;
- case DT_UNKNOWN:
- case DT_REG:
- case DT_DIR:
- case DT_LNK:
- /* valid entries - fall through. */
- ;
- }
- }
- len = strlen(dirp->d_name);
- if (len >= MAX_OBJECTNAME_LEN) {
- dirp->d_name[MAX_OBJECTNAME_LEN] = 0;
- len = MAX_OBJECTNAME_LEN-1;
- }
-
- if (!req->dentry)
- TUX_BUG();
-
- base.flags = flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->dentry);
- base.mnt = mntget(req->cwd_mnt);
-
- switch_docroot(req);
- err = path_walk(dirp->d_name, &base);
-
- Dprintk("path_walk() returned %d.\n", err);
-
- if (err) {
- if (err == -EWOULDBLOCKIO) {
- add_tux_atom(req, do_dir_line);
- queue_cachemiss(req);
- return;
- }
- goto next_dir;
- }
-
- dentry = base.dentry;
- mnt = base.mnt;
- if (!dentry)
- TUX_BUG();
- if (IS_ERR(dentry))
- TUX_BUG();
- inode = dentry->d_inode;
- if (!inode)
- TUX_BUG();
- if (!dirp->d_type)
- dirp->d_type = get_d_type(dentry);
- if (tux_hide_unreadable) {
- umode_t mode;
-
- mode = inode->i_mode;
- if (mode & tux_mode_forbidden)
- goto out_dput;
- if (!(mode & tux_mode_allowed))
- goto out_dput;
-
- err = permission(inode, MAY_READ, NULL);
- if (err)
- goto out_dput;
- if (dirp->d_type == DT_DIR) {
- err = permission(inode, MAY_EXEC, NULL);
- if (err)
- goto out_dput;
- }
- }
-
- tmp = req->proto->print_dir_line(req, string0, dirp->d_name, len, dirp->d_type, dentry, inode);
- if (tmp)
- str_len = tmp-string0;
-out_dput:
- dput(dentry);
- mntput(mnt);
-next_dir:
- curroff += dirp->d_reclen;
-
- if (tmp && (tmp != string0))
- Dprintk("writing line (len: %d): <%s>\n", strlen(string0), string0);
-
- if (curroff < total) {
- req->dirp0 = dirp0;
- req->curroff = curroff;
- add_tux_atom(req, do_dir_line);
- } else {
- kfree(dirp0);
- req->dirp0 = NULL;
- req->curroff = 0;
- // falls back to the list_directory atom
- }
- if (tmp && (tmp != string0))
- __send_async_message(req, string0, 200, str_len, 0);
- else
- add_req_to_workqueue(req);
-}
-
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
-#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
-#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
-
-static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
- ino_t ino, unsigned int d_type)
-{
- struct linux_dirent64 * dirent, d;
- struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf;
- int reclen = ROUND_UP64(NAME_OFFSET(dirent) + namlen + 1);
-
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
- dirent = buf->previous;
- if (dirent) {
- d.d_off = offset;
- copy_to_user(&dirent->d_off, &d.d_off, sizeof(d.d_off));
- }
- dirent = buf->current_dir;
- buf->previous = dirent;
- memset(&d, 0, NAME_OFFSET(&d));
- d.d_ino = ino;
- d.d_reclen = reclen;
- d.d_type = d_type;
- copy_to_user(dirent, &d, NAME_OFFSET(&d));
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- dirent = (void *)dirent + reclen;
- buf->current_dir = dirent;
- buf->count -= reclen;
- return 0;
-}
-#define DIRENT_SIZE 3000
-
-void list_directory (tux_req_t *req, int cachemiss)
-{
- struct getdents_callback64 buf;
- struct linux_dirent64 *dirp0;
- mm_segment_t oldmm;
- int total;
-
- Dprintk("list_directory(%p, %d), dentry: %p.\n", req, cachemiss, req->dentry);
- if (!req->cwd_dentry)
- TUX_BUG();
-
- if (!cachemiss) {
- add_tux_atom(req, list_directory);
- queue_cachemiss(req);
- return;
- }
-
- dirp0 = tux_kmalloc(DIRENT_SIZE);
-
- buf.current_dir = dirp0;
- buf.previous = NULL;
- buf.count = DIRENT_SIZE;
- buf.error = 0;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- total = vfs_readdir(&req->in_file, filldir64, &buf);
- set_fs(oldmm);
-
- if (buf.previous)
- total = DIRENT_SIZE - buf.count;
-
- Dprintk("total: %d (buf.error: %d, buf.previous %p)\n",
- total, buf.error, buf.previous);
-
- if (total < 0) {
- kfree(dirp0);
- req_err(req);
- add_req_to_workqueue(req);
- return;
- }
- if (!total) {
- kfree(dirp0);
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- return;
- }
-
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
-
- req->dirp0 = dirp0;
- req->curroff = 0;
- req->total = total;
- add_tux_atom(req, do_dir_line);
-
- add_req_to_workqueue(req);
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * extcgi.c: dynamic TUX module which forks and starts an external CGI
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-#include "parser.h"
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#define MAX_ENVLEN 1000
-#define MAX_CGI_METAVARIABLES 32
-#define CGI_CHUNK_SIZE 1024
-#define MAX_CGI_COMMAND_LEN 256
-
-#if CONFIG_TUX_DEBUG
-#define PRINT_MESSAGE_LEFT \
- Dprintk("CGI message left at %s:%d:\n--->{%s}<---\n", \
- __FILE__, __LINE__, curr)
-#else
-#define PRINT_MESSAGE_LEFT do {} while(0)
-#endif
-
-#define GOTO_INCOMPLETE do { Dprintk("invalid CGI reply at %s:%d.\n", __FILE__, __LINE__); goto invalid; } while (0)
-
-/*
- * Please acknowledge our hard work by not changing this define, or
- * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in
- * the ID string. Thanks! :-)
- */
-#define CGI_SUCCESS2 "HTTP/1.1 200 OK\r\nConnection: close\r\nServer: TUX/2.0 (Linux)\r\n"
-
-static int handle_cgi_reply (tux_req_t *req)
-{
- int first = 1;
- int len, left, total;
- char *buf, *tmp;
- mm_segment_t oldmm;
-
- buf = tux_kmalloc(CGI_CHUNK_SIZE+1);
- tux_close(3);
- tux_close(4);
- tux_close(5);
- oldmm = get_fs(); set_fs(KERNEL_DS);
- send_sync_buf(NULL, req->sock, CGI_SUCCESS2, sizeof(CGI_SUCCESS2)-1, MSG_MORE);
- set_fs(oldmm);
-
- req->bytes_sent = 0;
- /*
- * The new process is the new owner of the socket, it will
- * close it.
- */
-repeat:
- left = CGI_CHUNK_SIZE;
- len = 0;
- total = 0;
- tmp = buf;
- do {
- mm_segment_t oldmm;
-
- tmp += len;
- total += len;
- left -= len;
- if (!left)
- break;
-repeat_read:
- Dprintk("reading %d bytes via read().\n", left);
- oldmm = get_fs(); set_fs(KERNEL_DS);
- len = read(2, tmp, left);
- set_fs(oldmm);
- Dprintk("got %d bytes from read() (total: %d).\n", len, total);
- if (len > 0)
- tmp[len] = 0;
- Dprintk("CGI reply: (%d bytes, total %d).\n", len, total);
- if (len == -ERESTARTSYS) {
- flush_all_signals();
- goto repeat_read;
- }
- } while (len > 0);
- if (total > CGI_CHUNK_SIZE) {
- printk(KERN_ERR "TUX: CGI weirdness. total: %d, len: %d, left: %d.\n", total, len, left);
- TUX_BUG();
- }
- Dprintk("CGI done reply chunk: (%d bytes last, total %d).\n", len, total);
- if (total) {
- mm_segment_t oldmm;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- if (!len)
- send_sync_buf(NULL, req->sock, buf, total, 0);
- else
- send_sync_buf(NULL, req->sock, buf, total, MSG_MORE);
- set_fs(oldmm);
- req->bytes_sent += total;
- }
-
- Dprintk("bytes_sent: %d\n", req->bytes_sent);
- if ((total > 0) && first) {
- first = 0;
-
- if (buf[total])
- TUX_BUG();
- tmp = strstr(buf, "\n\n");
- if (tmp) {
- req->bytes_sent -= (tmp-buf) + 2;
- Dprintk("new bytes_sent: %d\n", req->bytes_sent);
- } else {
- req->bytes_sent = 0;
- req_err(req);
- }
- }
- if (len < 0)
- Dprintk("sys_read returned with %d.\n", len);
- else {
- if (total > 0)
- goto repeat;
- }
- tux_close(2);
-
- req->status = 200;
- add_req_to_workqueue(req);
- kfree(buf);
-
- return -1;
-}
-
-static int exec_external_cgi (void *data)
-{
- exec_param_t param;
- tux_req_t *req = data;
- char *envp[MAX_CGI_METAVARIABLES+1], **envp_p;
- char *argv[] = { "extcgi", NULL};
- char *envstr, *tmp;
- unsigned int host;
- struct k_sigaction *ka;
- int in_pipe_fds[2], out_pipe_fds[2], err_pipe_fds[2], len, err;
- char *command;
- pid_t pid;
-
- len = strlen(tux_common_docroot);
- if (req->objectname_len + len + 12 > MAX_CGI_COMMAND_LEN)
- return -ENOMEM;
- sprintf(current->comm,"cgimain - %d", current->pid);
- host = inet_sk(req->sock->sk)->daddr;
-
- envstr = tux_kmalloc(MAX_ENVLEN);
- command = tux_kmalloc(MAX_CGI_COMMAND_LEN);
-
- tmp = envstr;
- envp_p = envp;
-
-#define WRITE_ENV(str...) \
- if (envp_p >= envp + MAX_CGI_METAVARIABLES) \
- TUX_BUG(); \
- len = sprintf(tmp, str); \
- *envp_p++ = tmp; \
- tmp += len + 1; \
- if (tmp >= envstr + MAX_ENVLEN) \
- TUX_BUG();
-
- #define WRITE_ENV_STR(str,field,len) \
- do { \
- int offset; \
- \
- offset = sizeof(str)-1; \
- err = -EFAULT; \
- if (tmp - envstr + offset + len >= MAX_ENVLEN) \
- goto out; \
- if (envp_p >= envp + MAX_CGI_METAVARIABLES) \
- TUX_BUG(); \
- memcpy(tmp, str, offset); \
- memcpy(tmp + offset, field, len); \
- offset += len; \
- tmp[offset] = 0; \
- *envp_p++ = tmp; \
- tmp += offset + 1; \
- } while (0)
-
- WRITE_ENV("GATEWAY_INTERFACE=CGI/1.1");
- WRITE_ENV("CONTENT_LENGTH=%d", req->post_data_len);
- WRITE_ENV("REMOTE_ADDR=%d.%d.%d.%d", NIPQUAD(host));
- WRITE_ENV("SERVER_PORT=%d", 80);
- WRITE_ENV("SERVER_SOFTWARE=TUX/2.0 (Linux)");
-
-#if 1
- WRITE_ENV("DOCUMENT_ROOT=/");
- WRITE_ENV("PATH_INFO=/");
-#else
- WRITE_ENV_STR("DOCUMENT_ROOT=", tux_common_docroot, len);
- WRITE_ENV_STR("PATH_INFO=", tux_common_docroot, len);
-#endif
- WRITE_ENV_STR("QUERY_STRING=", req->query_str, req->query_len);
- WRITE_ENV_STR("REQUEST_METHOD=", req->method_str, req->method_len);
- WRITE_ENV_STR("SCRIPT_NAME=", req->objectname, req->objectname_len);
- WRITE_ENV_STR("SERVER_PROTOCOL=", req->version_str, req->version_len);
-
- if (req->content_type_len)
- WRITE_ENV_STR("CONTENT_TYPE=",
- req->content_type_str, req->content_type_len);
- if (req->cookies_len)
- WRITE_ENV_STR("HTTP_COOKIE=",
- req->cookies_str, req->cookies_len);
-
- if (req->host_len)
- WRITE_ENV_STR("SERVER_NAME=", req->host, req->host_len);
- else {
- const char *host = "localhost";
- WRITE_ENV_STR("SERVER_NAME=", host, strlen(host));
- }
-
- *envp_p = NULL;
-
- spin_lock_irq(¤t->sighand->siglock);
- ka = current->sighand->action + SIGPIPE-1;
- ka->sa.sa_handler = SIG_IGN;
- siginitsetinv(¤t->blocked, sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- tux_close(0); tux_close(1);
- tux_close(2); tux_close(3);
- tux_close(4); tux_close(5);
-
- in_pipe_fds[0] = in_pipe_fds[1] = -1;
- out_pipe_fds[0] = out_pipe_fds[1] = -1;
- err_pipe_fds[0] = err_pipe_fds[1] = -1;
-
- err = -ENFILE;
- if (do_pipe(in_pipe_fds))
- goto out;
- if (do_pipe(out_pipe_fds))
- goto out;
- if (do_pipe(err_pipe_fds))
- goto out;
-
- if (in_pipe_fds[0] != 0) TUX_BUG();
- if (in_pipe_fds[1] != 1) TUX_BUG();
- if (out_pipe_fds[0] != 2) TUX_BUG();
- if (out_pipe_fds[1] != 3) TUX_BUG();
- if (err_pipe_fds[0] != 4) TUX_BUG();
- if (err_pipe_fds[1] != 5) TUX_BUG();
-
- if (req->virtual && req->host_len)
- sprintf(command, "/%s/cgi-bin/%s", req->host, req->objectname);
- else
- sprintf(command, "/cgi-bin/%s", req->objectname);
- Dprintk("before CGI exec.\n");
- pid = tux_exec_process(command, argv, envp, 1, ¶m, 0);
- Dprintk("after CGI exec.\n");
-
- if (req->post_data_len) {
- mm_segment_t oldmm;
- int ret;
-
- Dprintk("POST data to CGI:\n");
- oldmm = get_fs(); set_fs(KERNEL_DS);
- ret = write(1, req->post_data_str, req->post_data_len);
- set_fs(oldmm);
- Dprintk("write() returned: %d.\n", ret);
- if (ret != req->post_data_len)
- Dprintk("write() returned: %d.\n", ret);
- }
-
- tux_close(0);
- tux_close(1);
-
- handle_cgi_reply(req);
- err = 0;
-
-out:
- kfree(envstr);
- kfree(command);
-
- return err;
-}
-
-void start_external_cgi (tux_req_t *req)
-{
- int pid;
-
-repeat:
- pid = kernel_thread(exec_external_cgi, (void*) req, SIGCHLD);
- if (pid == -1)
- return;
- if (pid < 0) {
- printk(KERN_INFO "TUX: Could not fork external CGI process due to %d, retrying!\n", pid);
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- goto repeat;
- }
-}
-
-int query_extcgi (tux_req_t *req)
-{
- clear_keepalive(req);
- start_external_cgi(req);
- return -1;
-}
-
-#define EXTCGI_INVALID_HEADER \
- "HTTP/1.1 503 Service Unavailable\r\n" \
- "Content-Length: 23\r\n\r\n"
-
-#define EXTCGI_INVALID_BODY \
- "TUX: invalid CGI reply."
-
-#define EXTCGI_INVALID EXTCGI_INVALID_HEADER EXTCGI_INVALID_BODY
-
+++ /dev/null
-/* $Id: zlib.h,v 1.2 1997/12/23 10:47:44 paulus Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/zlib.h>
-#include <net/tux.h>
-
-#define STREAM_END_SPACE 12
-
-int tux_gzip_compress (tux_req_t *req, unsigned char *data_in, unsigned char *data_out, __u32 *in_len, __u32 *out_len)
-{
- z_stream *s = &req->ti->gzip_state;
- int ret, left;
-
- down(&req->ti->gzip_sem);
- if (zlib_deflateReset(s) != Z_OK)
- BUG();
-
- s->next_in = data_in;
- s->next_out = data_out;
- s->avail_in = *in_len;
- s->avail_out = *out_len;
-
- Dprintk("calling zlib_deflate with avail_in %d, avail_out %d\n", s->avail_in, s->avail_out);
- ret = zlib_deflate(s, Z_FINISH);
- Dprintk("deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", s->avail_in, s->avail_out, s->total_in, s->total_out);
-
- if (ret != Z_STREAM_END) {
- printk("bad: deflate returned with %d! avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", ret, s->avail_in, s->avail_out, s->total_in, s->total_out);
- BUG();
- }
- *in_len = s->avail_in;
- *out_len = s->avail_out;
- left = s->avail_in;
-
- up(&req->ti->gzip_sem);
-
- return left;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * input.c: handle requests arriving on accepted connections
- */
-
-#include <net/tux.h>
-#include <linux/kmod.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void zap_request (tux_req_t *req, int cachemiss)
-{
- if (!req->error)
- TUX_BUG();
- if (req->error == TUX_ERROR_CONN_TIMEOUT) {
- if (req->proto->request_timeout) {
- clear_keepalive(req);
- req->proto->request_timeout(req, cachemiss);
- } else {
- clear_keepalive(req);
- if (!cachemiss)
- flush_request(req, 0);
- else {
- add_tux_atom(req, flush_request);
- add_req_to_workqueue(req);
- }
- }
- return;
- }
-
- if (!cachemiss && (req->error == TUX_ERROR_CONN_CLOSE)) {
- /*
- * Zap connection as fast as possible, there is
- * no valid client connection anymore:
- */
- clear_keepalive(req);
- flush_request(req, 0);
- } else {
- if (req->error == TUX_ERROR_CONN_CLOSE) {
- clear_keepalive(req);
- add_tux_atom(req, flush_request);
- } else
- /*
- * Potentially redirect to the secondary server:
- */
- add_tux_atom(req, redirect_request);
- add_req_to_workqueue(req);
- }
-}
-
-void __switch_docroot(tux_req_t *req)
-{
- if (!req->docroot_dentry || !req->docroot_mnt)
- TUX_BUG();
- set_fs_root(current->fs, req->docroot_mnt, req->docroot_dentry);
-}
-
-struct dentry * __tux_lookup (tux_req_t *req, const char *filename,
- struct nameidata *base, struct vfsmount **mnt)
-{
- int err;
-
- err = path_walk(filename, base);
- if (err) {
- Dprintk("path_walk() returned with %d!\n", err);
- return ERR_PTR(err);
- }
- if (*mnt)
- TUX_BUG();
- *mnt = base->mnt;
-
- return base->dentry;
-}
-
-int tux_permission (struct inode *inode)
-{
- umode_t mode;
- int err;
-
- mode = inode->i_mode;
- Dprintk("URL inode mode: %08x.\n", mode);
-
- if (mode & tux_mode_forbidden)
- return -2;
- /*
- * at least one bit in the 'allowed' set has to
- * be present to allow access.
- */
- if (!(mode & tux_mode_allowed))
- return -3;
- err = permission(inode,MAY_READ,NULL);
- return err;
-}
-
-struct dentry * tux_lookup (tux_req_t *req, const char *filename,
- const unsigned int flag, struct vfsmount **mnt)
-{
- struct dentry *dentry;
- struct nameidata base;
-
- Dprintk("tux_lookup(%p, %s, %d, virtual: %d, host: %s (%d).)\n", req, filename, flag, req->virtual, req->host, req->host_len);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- if (req->objectname[0] == '/') {
- base.dentry = dget(req->docroot_dentry);
- base.mnt = mntget(req->docroot_mnt);
- } else {
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
- base.dentry = req->cwd_dentry;
- dget(base.dentry);
- base.mnt = mntget(req->cwd_mnt);
- }
-
- switch_docroot(req);
- dentry = __tux_lookup (req, filename, &base, mnt);
-
- Dprintk("looked up {%s} == dentry %p.\n", filename, dentry);
-
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
- return dentry;
-}
-
-int lookup_object (tux_req_t *req, const unsigned int flag)
-{
- struct vfsmount *mnt = NULL;
- struct dentry *dentry = NULL;
- int perm;
-
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- goto cachemiss;
- goto abort;
- }
- perm = tux_permission(dentry->d_inode);
- /*
- * Only regular files allowed.
- */
- if ((perm < 0) || !S_ISREG(dentry->d_inode->i_mode)) {
- req->status = 403;
- goto abort;
- }
- req->total_file_len = dentry->d_inode->i_size;
-out:
- install_req_dentry(req, dentry, mnt);
- return 0;
-cachemiss:
- return 1;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- goto out;
-}
-
-void install_req_dentry (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt)
-{
- if (req->dentry)
- TUX_BUG();
- req->dentry = dentry;
- if (req->mnt)
- TUX_BUG();
- req->mnt = mnt;
- if (req->in_file.f_dentry)
- TUX_BUG();
- if (dentry)
- open_private_file(&req->in_file, dentry, FMODE_READ);
-}
-
-void release_req_dentry (tux_req_t *req)
-{
- if (!req->dentry) {
- if (req->in_file.f_dentry)
- TUX_BUG();
- return;
- }
- if (req->in_file.f_op && req->in_file.f_op->release)
- req->in_file.f_op->release(req->dentry->d_inode, &req->in_file);
- memset(&req->in_file, 0, sizeof(req->in_file));
-
- dput(req->dentry);
- req->dentry = NULL;
- mntput(req->mnt);
- req->mnt = NULL;
-}
-
-int __connection_too_fast (tux_req_t *req)
-{
- unsigned long curr_bw, delta, bytes;
-
- bytes = req->total_bytes + req->bytes_sent;
- if (!bytes)
- return 1;
-
- delta = jiffies - req->first_timestamp;
- if (!delta)
- delta++;
- curr_bw = bytes * HZ / delta;
-
- if (curr_bw > tux_max_output_bandwidth)
- return 2;
- return 0;
-}
-
-void unidle_req (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- Dprintk("UNIDLE req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
- spin_lock_irq(&ti->work_lock);
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (!test_and_clear_bit(0, &req->idle_input)) {
- Dprintk("unidling %p, wasnt idle!\n", req);
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(&req->work);
- DEBUG_DEL_LIST(&req->work);
- DEC_STAT(nr_work_pending);
- } else {
- del_keepalive_timer(req);
- DEC_STAT(nr_idle_input_pending);
- Dprintk("unidled %p.\n", req);
- }
- if (req->idle_input)
- TUX_BUG();
- spin_unlock_irq(&ti->work_lock);
-}
-
-#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete; } while (0)
-#define GOTO_REDIRECT do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect; } while (0)
-#define GOTO_REDIRECT_NONIDLE do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect_nonidle; } while (0)
-
-static int read_request (struct socket *sock, char *buf, int max_size)
-{
- mm_segment_t oldmm;
- struct kiocb iocb;
- struct msghdr msg;
- struct iovec iov;
-
- int len;
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- msg.msg_iov->iov_base = buf;
- msg.msg_iov->iov_len = max_size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
-
-read_again:
- init_sync_kiocb(&iocb, NULL);
- len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, max_size,
- MSG_DONTWAIT, MSG_PEEK, NULL);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
-
- /*
- * We must not get a signal inbetween
- */
- if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
- if (!signal_pending(current)) {
- len = 0;
- goto out;
- }
- flush_all_signals();
- goto read_again;
- }
-out:
- set_fs(oldmm);
- return len;
-}
-
-/*
- * We inline URG data so it's at the head of the normal receive queue.
- */
-static int zap_urg_data (struct socket *sock)
-{
- mm_segment_t oldmm;
- struct msghdr msg;
- struct iovec iov;
- struct kiocb iocb;
- int len;
- char buf[10];
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- msg.msg_iov->iov_base = buf;
- msg.msg_iov->iov_len = 2;
-
-read_again:
- init_sync_kiocb(&iocb, NULL);
- len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, 2,
- MSG_DONTWAIT, 0, NULL);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
- Dprintk("recvmsg(MSG_OOB) returned %d.\n", len);
-
- /*
- * We must not get a signal inbetween
- */
- if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
- if (!signal_pending(current)) {
- len = 0;
- goto out;
- }
- flush_all_signals();
- goto read_again;
- }
-out:
- set_fs(oldmm);
-
- Dprintk("in out:.. and will return %d.!\n", len);
-
- return len;
-}
-
-void trunc_headers (tux_req_t *req)
-{
- struct sock *sk = req->sock->sk;
- int len, addr_len = 0;
- struct kiocb iocb;
-
- if (!req->parsed_len)
- TUX_BUG();
-repeat_trunc:
- init_sync_kiocb(&iocb, NULL);
- len = sk->sk_prot->recvmsg(&iocb, sk, NULL, req->parsed_len, 1, MSG_TRUNC, &addr_len);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
- if ((len == -ERESTARTSYS) || (len == -EAGAIN)) {
- flush_all_signals();
- goto repeat_trunc;
- }
- Dprintk("truncated (TRUNC) %d bytes at %p. (wanted: %d.)\n", len, __builtin_return_address(0), req->parsed_len);
-
-
-
- req->parsed_len = 0;
-}
-
-void print_req (tux_req_t *req)
-{
- struct sock *sk;
-
- printk("PRINT req %p <%p>, sock %p\n",
- req, __builtin_return_address(0), req->sock);
- printk("... idx: %d\n", req->atom_idx);
- if (req->sock) {
- sk = req->sock->sk;
- printk("... sock %p, sk %p, sk->state: %d, sk->err: %d\n", req->sock, sk, sk->sk_state, sk->sk_err);
- printk("... write_queue: %d, receive_queue: %d, error_queue: %d, keepalive: %d, status: %d\n", !skb_queue_empty(&sk->sk_write_queue), !skb_queue_empty(&sk->sk_receive_queue), !skb_queue_empty(&sk->sk_error_queue), req->keep_alive, req->status);
- printk("...tp->send_head: %p\n", sk->sk_send_head);
- printk("...tp->snd_una: %08x\n", tcp_sk(sk)->snd_una);
- printk("...tp->snd_nxt: %08x\n", tcp_sk(sk)->snd_nxt);
- printk("...tp->packets_out: %08x\n", tcp_sk(sk)->packets_out);
- }
- printk("... meth:{%s}, uri:{%s}, query:{%s}, ver:{%s}\n", req->method_str ? req->method_str : "<null>", req->uri_str ? req->uri_str : "<null>", req->query_str ? req->query_str : "<null>", req->version_str ? req->version_str : "<null>");
- printk("... post_data:{%s}(%d).\n", req->post_data_str, req->post_data_len);
- printk("... headers: {%s}\n", req->headers);
-}
-/*
- * parse_request() reads all available TCP/IP data and prepares
- * the request if the TUX request is complete. (we can get TUX
- * requests in several packets.) Invalid requests are redirected
- * to the secondary server.
- */
-
-void parse_request (tux_req_t *req, int cachemiss)
-{
- int len, parsed_len;
- struct sock *sk = req->sock->sk;
- struct tcp_opt *tp = tcp_sk(sk);
- int was_keepalive = req->keep_alive;
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
-
- SET_TIMESTAMP(req->parse_timestamp);
-
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- INC_STAT(nr_idle_input_pending);
- spin_unlock_irq(&req->ti->work_lock);
-
- Dprintk("idled request %p.\n", req);
-
-restart:
-
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
- len = zap_urg_data(req->sock);
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
- req->error = TUX_ERROR_CONN_CLOSE;
- goto redirect_error;
- }
- }
-
- INC_STAT(input_slowpath);
-
- if (!req->headers)
- req->headers = tux_kmalloc(tux_max_header_len);
-
- /* First, read the data */
- len = read_request(req->sock, (char *)req->headers, tux_max_header_len-1);
- if (len < 0) {
- req->error = TUX_ERROR_CONN_CLOSE;
- goto redirect_error;
- }
- if (!len)
- GOTO_INCOMPLETE;
-
- /*
- * Make it a zero-delimited string to automatically get
- * protection against various buffer overflow situations.
- * Then pass it to the TUX application protocol stack.
- */
- ((char *)req->headers)[len] = 0;
- req->headers_len = len;
-
- parsed_len = req->proto->parse_message(req, len);
-
- /*
- * Is the request fully read? (or is there any error)
- */
- if (parsed_len < 0)
- GOTO_REDIRECT;
- if (!parsed_len) {
- /*
- * Push pending ACK which was delayed due to the
- * pingpong optimization:
- */
- if (was_keepalive) {
- lock_sock(sk);
- tp->ack.pingpong = 0;
- tp->ack.pending |= TCP_ACK_PUSHED;
- cleanup_rbuf(sk, 1);
- release_sock(sk);
- }
- if (len >= tux_max_header_len-1)
- GOTO_REDIRECT;
- GOTO_INCOMPLETE;
- }
- unidle_req(req);
-
- tp->nonagle = 2;
-
- add_req_to_workqueue(req);
- return;
-
-redirect:
- TDprintk("req %p will be redirected!\n", req);
- req_err(req);
-
-redirect_error:
- unidle_req(req);
-
- if (len < 0)
- req->parsed_len = 0;
- else
- req->parsed_len = len;
-
- INC_STAT(parse_static_redirect);
- if (req->headers)
- kfree(req->headers);
- req->headers = NULL;
- if (req->error)
- zap_request(req, cachemiss);
- return;
-
-incomplete:
- if (req->error)
- goto redirect_error;
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ))
- goto restart;
-
- add_tux_atom(req, parse_request);
- INC_STAT(parse_static_incomplete);
- tux_push_req(req);
-}
-
-int process_requests (threadinfo_t *ti, tux_req_t **user_req)
-{
- struct list_head *head, *curr;
- int count = 0;
- tux_req_t *req;
-
- *user_req = NULL;
-
-restart_loop:
- spin_lock_irq(&ti->work_lock);
- head = &ti->work_pending;
- curr = head->next;
-
- if (curr != head) {
- int i;
-
- req = list_entry(curr, tux_req_t, work);
- Dprintk("PROCESS req %p <%p>.\n",
- req, __builtin_return_address(0));
- for (i = 0; i < req->atom_idx; i++)
- Dprintk("... atom %d: %p\n", i, req->atoms[i]);
-
- if (req->ti != ti)
- TUX_BUG();
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
-
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(curr);
- DEBUG_DEL_LIST(&req->work);
- spin_unlock_irq(&ti->work_lock);
-
- if (!req->atom_idx) {
- if (req->usermode) {
- *user_req = req;
- return count;
- }
- /*
- * idx == 0 requests are flushed automatically.
- */
- flush_request(req, 0);
- } else
- tux_schedule_atom(req, 0);
- count++;
- goto restart_loop;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
-int tux_flush_workqueue (threadinfo_t *ti)
-{
- struct list_head *head, *curr, *next;
- tux_req_t *req;
- int count = 0;
-
-restart:
- spin_lock_irq(&ti->work_lock);
- head = &ti->work_pending;
- curr = head->next;
-
- if (curr != head) {
- req = list_entry(curr, tux_req_t, work);
- next = curr->next;
- clear_bit(0, &req->idle_input);
- clear_bit(0, &req->wait_output_space);
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(curr);
- DEBUG_DEL_LIST(curr);
- DEC_STAT(nr_input_pending);
- spin_unlock_irq(&ti->work_lock);
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->atom_idx = 0;
- clear_keepalive(req);
- req->status = -1;
- if (req->usermode) {
- req->usermode = 0;
- req->private = 0;
- }
- flush_request(req, 0);
- count++;
- goto restart;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
-int print_all_requests (threadinfo_t *ti)
-{
- struct list_head *head, *curr;
- tux_req_t *req;
- int count = 0;
-
- spin_lock_irq(&ti->work_lock);
- head = &ti->all_requests;
- curr = head->next;
-
- while (curr != head) {
- req = list_entry(curr, tux_req_t, all);
- curr = curr->next;
- print_req(req);
- count++;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * Cleaned up logger output for Alpha.
- * -- Phil Ezolt (Phillip.Ezolt@compaq.com) & Bill Carr (wcarr92@yahoo.com)
- *
- * logger.c: log requests finished by TUX.
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
-static unsigned int log_head, log_tail;
-static char * log_buffer = NULL;
-static DECLARE_WAIT_QUEUE_HEAD(log_wait);
-static DECLARE_WAIT_QUEUE_HEAD(log_full);
-static int logger_pid = 0;
-
-/*
- * High-speed TUX logging architecture:
- *
- * All fast threads share a common log-ringbuffer. (default size 1MB)
- * Log entries are binary and are padded to be cacheline aligned, this
- * ensures that there is no cache-pingpong between fast threads.
- *
- * The logger thread writes out pending log entries within 1 second
- * (buffer-cache writes data out within 5 seconds). The logger thread
- * gets activated once we have more than 25% of the log ringbuffer
- * filled - or the 1 second log timeout expires. Fast threads block
- * if if more than 95% of the ringbuffer is filled and unblock only
- * if used logbuffer space drops below 90%.
- *
- * This architecture guarantees that 1) logging is reliable (no
- * log entry is ever lost), 2) timely (touches disk within 6 seconds),
- * 3) in the log-contention case the saturation behavior is still
- * write-clustered, but 4) if the logger thread can keep up then
- * the coupling is completely asynchron and parallel.
- *
- * The binary log format gives us about 50% saved IO/memory bandwith
- * and 50% less on-disk used log space than the traditional W3C ASCII
- * format.
- *
- * (We might switch to raw IO though to write the logfile.)
- */
-
-#define SOFT_LIMIT (LOG_LEN*25/100)
-#define HARD_LIMIT (LOG_LEN*95/100)
-#define HARD_RELAX_LIMIT (LOG_LEN*90/100)
-
-unsigned int tux_logentry_align_order = 5;
-
-#if SMP_CACHE_BYTES == 8
-# define TUX_LOGENTRY_ALIGN 3
-#else
-#if SMP_CACHE_BYTES == 16
-# define TUX_LOGENTRY_ALIGN 4
-#else
-#if SMP_CACHE_BYTES == 32
-# define TUX_LOGENTRY_ALIGN 5
-#else
-#if SMP_CACHE_BYTES == 64
-# define TUX_LOGENTRY_ALIGN 6
-#else
-#if SMP_CACHE_BYTES == 128
-# define TUX_LOGENTRY_ALIGN 7
-#else
-#if SMP_CACHE_BYTES == 256
-# define TUX_LOGENTRY_ALIGN 8
-#else
-#error Add entry!
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-
-#define ROUND_UP(x) (((((x)-1) >> TUX_LOGENTRY_ALIGN) + 1) \
- << TUX_LOGENTRY_ALIGN)
-
-static void __throttle_logging (void)
-{
- DECLARE_WAITQUEUE(wait, current);
- int pending;
-
- add_wait_queue(&log_full, &wait);
- for (;;) {
- static unsigned long last_warning = 0;
-
- if (jiffies - last_warning > 10*HZ) {
- last_warning = jiffies;
- printk(KERN_NOTICE "TUX: log buffer overflow, have to throttle TUX thread!\n");
- }
-
- current->state = TASK_INTERRUPTIBLE;
-
- spin_lock(&log_lock);
- pending = log_head-log_tail;
- spin_unlock(&log_lock);
-
- if ((pending % LOG_LEN) < HARD_LIMIT)
- break;
-
- schedule();
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&log_full, &wait);
-}
-
-#if CONFIG_TUX_DEBUG
-#define CHECK_LOGPTR(ptr) \
-do { \
- if ((ptr < log_buffer) || (ptr > log_buffer + LOG_LEN)) { \
- printk(KERN_ERR "TUX: ouch: log ptr %p > %p + %ld!\n", \
- ptr, log_buffer, LOG_LEN); \
- TUX_BUG(); \
- } \
-} while (0)
-#else
-#define CHECK_LOGPTR(ptr) do { } while (0)
-#endif
-
-void __log_request (tux_req_t *req)
-{
- char *str, *next;
- const char *uri_str;
- unsigned int inc, len, uri_len, pending, next_head, def_vhost_len = 0;
- unsigned long flags;
-
- if (req->proto->pre_log)
- req->proto->pre_log(req);
- /*
- * Log the reply status (success, or type of failure)
- */
- if (!tux_log_incomplete && (!req->status || (req->bytes_sent == -1))) {
-
- Dprintk("not logging req %p: {%s} [%d/%d]\n", req, req->uri_str, req->status, req->bytes_sent);
- return;
- }
- Dprintk("uri: {%s} [%d]\n", req->uri_str, req->uri_len);
-
-#define NO_URI "<none>"
- if (req->uri_len) {
- uri_len = req->uri_len;
- uri_str = req->uri_str;
- } else {
- uri_str = NO_URI;
- uri_len = sizeof(NO_URI)-1;
- }
- len = uri_len + 1;
-
- if (req->virtual) {
- if (req->host_len)
- len += req->host_len;
- else {
- def_vhost_len = strlen(tux_default_vhost);
- len += def_vhost_len;
- }
- }
-
- Dprintk("method_str: {%s} [%d]\n", req->method_str, req->method_len);
- len += req->method_len + 1;
-
- Dprintk("version_str: {%s} [%d]\n", req->version_str, req->version_len);
- len += req->version_len + 1;
-
-#if CONFIG_TUX_EXTENDED_LOG
- Dprintk("user_agent_str: {%s} [%d]\n", req->user_agent_str, req->user_agent_len);
- len += req->user_agent_len + 1;
-#endif
- if (tux_referer_logging) {
- Dprintk("referer_str: {%s} [%d]\n", req->referer_str, req->referer_len);
- len += req->referer_len;
- }
- len++;
-
- inc = 5*sizeof(u32) + len;
-#if CONFIG_TUX_EXTENDED_LOG
- inc += 7*sizeof(u32);
-#endif
-
- spin_lock_irqsave(&log_lock, flags);
-
- next_head = ROUND_UP(log_head + inc);
-
- if (next_head < LOG_LEN) {
- str = log_buffer + log_head;
- if (str > log_buffer + LOG_LEN)
- TUX_BUG();
- log_head = next_head;
- } else {
- if (log_head < LOG_LEN)
- memset(log_buffer+log_head, 0, LOG_LEN-log_head);
- str = log_buffer;
- log_head = ROUND_UP(inc);
- }
-
- if (str < log_buffer || str+inc >= log_buffer+LOG_LEN)
- TUX_BUG();
-
- /*
- * Log record signature - this makes finding the next entry
- * easier (since record length is variable), and makes the
- * binary logfile more robust against potential data corruption
- * and other damage. The signature also servers as a log format
- * version identifier.
- */
-#if CONFIG_TUX_EXTENDED_LOG
- *(u32 *)str = 0x2223beef;
-#else
- *(u32 *)str = 0x1112beef;
-#endif
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- *(u32 *)str = 0;
- /*
- * Log the client IP address:
- */
- if (tux_ip_logging)
- *(u32 *)str = req->client_addr;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
-#if CONFIG_TUX_EXTENDED_LOG
- /*
- * Log the client port number:
- */
- *(u32 *)str = 0;
- if (tux_ip_logging)
- *(u32 *)str = req->client_port;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-#endif
-
- /*
- * Log the request timestamp, in units of 'seconds since 1970'.
- */
- *(u32 *)str = CURRENT_TIME.tv_sec;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
-#if CONFIG_TUX_EXTENDED_LOG
- *(u32 *)str = req->accept_timestamp; str += sizeof(u32);
- *(u32 *)str = req->parse_timestamp; str += sizeof(u32);
- *(u32 *)str = req->output_timestamp; str += sizeof(u32);
- *(u32 *)str = req->flush_timestamp; str += sizeof(u32);
- *(u32 *)str = req->had_cachemiss; str += sizeof(u32);
- *(u32 *)str = req->keep_alive; str += sizeof(u32);
-#endif
- /*
- * Log the requested file size (in fact, log actual bytes sent.)
- */
- *(u32 *)str = req->bytes_sent;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- *(u32 *)str = req->status;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- /*
- * Zero-terminated method, (base) URI, query and version string.
- */
- if (req->method_len) {
- memcpy(str, req->method_str, req->method_len);
- str += req->method_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-
- if (req->virtual) {
- if (req->host_len) {
- memcpy(str, req->host, req->host_len);
- str += req->host_len;
- } else {
- memcpy(str, tux_default_vhost, def_vhost_len);
- str += def_vhost_len;
- }
- CHECK_LOGPTR(str);
- }
-
- memcpy(str, uri_str, uri_len);
- str += uri_len;
- *str++ = 0;
-
- CHECK_LOGPTR(str);
-
- if (req->version_len) {
- memcpy(str, req->version_str, req->version_len);
- str += req->version_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-#if CONFIG_TUX_EXTENDED_LOG
- if (req->user_agent_len) {
- memcpy(str, req->user_agent_str, req->user_agent_len);
- str += req->user_agent_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-#endif
- CHECK_LOGPTR(str);
-
- if (tux_referer_logging && req->referer_len) {
- memcpy(str, req->referer_str, req->referer_len);
- str += req->referer_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
- CHECK_LOGPTR(str);
- /*
- * pad with spaces to next cacheline, with an ending newline.
- * (not needed for the user-space log utility, but results in
- * a more readable binary log file, and reduces the amount
- * of cache pingpong.)
- */
- next = (char *)ROUND_UP((unsigned long)str);
-
- CHECK_LOGPTR(next);
- len = next-str;
- memset(str, ' ', len);
-
- pending = (log_head-log_tail) % LOG_LEN;
- spin_unlock_irqrestore(&log_lock, flags);
-
- if (pending >= SOFT_LIMIT)
- wake_up(&log_wait);
-
- if (pending >= HARD_LIMIT)
- __throttle_logging();
-}
-
-void tux_push_pending (struct sock *sk)
-{
- struct tcp_opt *tp = tcp_sk(sk);
-
- Dprintk("pushing pending frames on sock %p.\n", sk);
- lock_sock(sk);
- if ((sk->sk_state == TCP_ESTABLISHED) && !sk->sk_err) {
- tp->ack.pingpong = tux_ack_pingpong;
- tp->nonagle = 1;
- __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 0), TCP_NAGLE_OFF);
- }
- release_sock(sk);
-}
-
-inline void tux_push_req (tux_req_t *req)
-{
- if (req->sock)
- tux_push_pending(req->sock->sk);
- if (req->data_sock)
- tux_push_pending(req->data_sock->sk);
-}
-
-void __put_data_sock (tux_req_t *req)
-{
- unlink_tux_data_socket(req);
- if (req->data_sock->file)
- fput(req->data_sock->file);
- else
- sock_release(req->data_sock);
- req->data_sock = NULL;
-}
-
-/* open-coded sys_close */
-
-long tux_close(unsigned int fd)
-{
- struct file * filp;
- struct files_struct *files = current->files;
-
- spin_lock(&files->file_lock);
- if (fd >= files->max_fds)
- goto out_unlock;
- filp = files->fd[fd];
- if (!filp)
- goto out_unlock;
- files->fd[fd] = NULL;
- FD_CLR(fd, files->close_on_exec);
- /* __put_unused_fd(files, fd); */
- __FD_CLR(fd, files->open_fds);
- if (fd < files->next_fd)
- files->next_fd = fd;
- spin_unlock(&files->file_lock);
- return filp_close(filp, files);
-
-out_unlock:
- spin_unlock(&files->file_lock);
- return -EBADF;
-}
-
-void flush_request (tux_req_t *req, int cachemiss)
-{
- struct socket *sock;
- struct sock *sk;
- int keep_alive;
-
- if (cachemiss)
- TUX_BUG();
- __set_task_state(current, TASK_RUNNING);
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (req->ti->thread != current)
- TUX_BUG();
-#if CONFIG_TUX_DEBUG
- if (req->bytes_expected && (req->bytes_sent != req->bytes_expected)) {
- printk("hm, bytes_expected: %d != bytes_sent: %d!\n",
- req->bytes_expected, req->bytes_sent);
- TUX_BUG();
- }
-#endif
- SET_TIMESTAMP(req->flush_timestamp);
-
- log_request(req);
- sock = req->sock;
- sk = NULL;
- if (sock)
- sk = sock->sk;
- Dprintk("FLUSHING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), sock, sk, req->keep_alive, req->status);
- if (req->in_file.f_pos)
- /*TUX_BUG()*/;
- release_req_dentry(req);
- req->private = 0;
-
- if (req->docroot_dentry) {
- dput(req->docroot_dentry);
- req->docroot_dentry = NULL;
- if (!req->docroot_mnt)
- TUX_BUG();
- }
- if (req->docroot_mnt) {
- mntput(req->docroot_mnt);
- req->docroot_mnt = NULL;
- }
-
- req->offset_start = 0;
- req->offset_end = 0;
- req->output_len = 0;
- req->total_file_len = 0;
- req->lendigits = 0;
- req->mtime = 0;
- req->etaglen = 0;
- req->etag[0] = 0;
- req->ftp_command = 0;
-
- if (req->postponed)
- TUX_BUG();
- if (test_bit(0, &req->idle_input))
- TUX_BUG();
- if (test_bit(0, &req->wait_output_space))
- TUX_BUG();
- if (req->parsed_len)
- trunc_headers(req);
- if (req->parsed_len)
- TUX_BUG();
- req->attr = NULL;
- req->usermode = 0;
- req->usermodule_idx = 0;
- req->atom_idx = 0;
- if (req->module_dentry) {
- dput(req->module_dentry);
- req->module_dentry = NULL;
- }
- if (req->headers)
- kfree(req->headers);
- req->headers = NULL;
- req->headers_len = 0;
-
- req->method = METHOD_NONE;
- req->method_len = 0;
- req->method_str = NULL;
- req->version = 0;
- req->version_str = NULL;
- req->version_len = 0;
-
- req->uri_str = NULL;
- req->uri_len = 0;
-
- req->objectname[0] = 0;
- req->objectname_len = 0;
-
- req->query_str = NULL;
- req->query_len = 0;
-
- req->cookies_str = NULL;
- req->cookies_len = 0;
- req->parse_cookies = 0;
-
- req->contentlen_str = NULL;
- req->contentlen_len = 0;
- req->content_len = 0;
-
- req->user_agent_str = NULL;
- req->user_agent_len = 0;
-
- req->may_send_gzip = 0;
- req->content_gzipped = 0;
-
- req->content_type_str = NULL;
- req->content_type_len = 0;
-
- req->accept_str = NULL;
- req->accept_len = 0;
-
- req->accept_charset_str = NULL;
- req->accept_charset_len = 0;
-
- req->accept_encoding_str = NULL;
- req->accept_encoding_len = 0;
-
- req->accept_language_str = NULL;
- req->accept_language_len = 0;
-
- req->cache_control_str = NULL;
- req->cache_control_len = 0;
-
- req->if_modified_since_str = NULL;
- req->if_modified_since_len = 0;
-
- req->if_none_match_str = NULL;
- req->if_none_match_len = 0;
-
- req->if_range_str = NULL;
- req->if_range_len = 0;
-
- req->negotiate_str = NULL;
- req->negotiate_len = 0;
-
- req->pragma_str = NULL;
- req->pragma_len = 0;
-
- req->referer_str = NULL;
- req->referer_len = 0;
-
- req->post_data_str = NULL;
- req->post_data_len = 0;
-
- SET_TIMESTAMP(req->accept_timestamp);
-#if CONFIG_TUX_EXTENDED_LOG
- req->parse_timestamp = 0;
- req->output_timestamp = 0;
- req->flush_timestamp = 0;
-#endif
- req->status = 0;
-
- req->total_bytes += req->bytes_sent;
- req->bytes_sent = 0;
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->body_len = 0;
- keep_alive = req->keep_alive;
- clear_keepalive(req);
- req->had_cachemiss = 0;
- // first_timestamp and total_bytes is kept!
- req->event = 0;
- req->lookup_dir = 0;
- req->lookup_404 = 0;
-
- req->error = 0;
- req->user_error = 0;
-
- if (req->abuf.page)
- __free_page(req->abuf.page);
- memset(&req->abuf, 0, sizeof(req->abuf));
-
- if (sk && keep_alive) {
- add_tux_atom(req, parse_request);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- /*
- * Avoid the race with the event callback:
- */
- if (skb_queue_empty(&sk->sk_receive_queue) ||
- !test_and_clear_bit(0, &req->idle_input)) {
- INC_STAT(nr_idle_input_pending);
- spin_unlock_irq(&req->ti->work_lock);
- tux_push_req(req);
- goto out;
- }
- del_keepalive_timer(req);
- spin_unlock_irq(&req->ti->work_lock);
- }
- Dprintk("KEEPALIVE PENDING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
- add_req_to_workqueue(req);
- INC_STAT(nr_keepalive_optimized);
- goto out;
- }
-
- del_timer_sync(&req->keepalive_timer);
- del_timer_sync(&req->output_timer);
-
- if (timer_pending(&req->keepalive_timer))
- TUX_BUG();
- if (timer_pending(&req->output_timer))
- TUX_BUG();
- if (!list_empty(&req->lru))
- TUX_BUG();
- req->nr_keepalives = 0;
- req->client_addr = 0;
- req->client_port = 0;
- req->virtual = 0;
- req->ftp_offset_start = 0;
-
- req->host[0] = 0;
- req->host_len = 0;
-
- if (req->cwd_dentry) {
- dput(req->cwd_dentry);
- req->cwd_dentry = NULL;
- if (!req->cwd_mnt)
- TUX_BUG();
- }
- if (req->cwd_mnt) {
- mntput(req->cwd_mnt);
- req->cwd_mnt = NULL;
- }
- put_data_sock(req);
- req->prev_pos = 0;
- req->curroff = 0;
- req->total = 0;
- if (req->dirp0) {
- kfree(req->dirp0);
- req->dirp0 = NULL;
- }
-
- if (sk)
- unlink_tux_socket(req);
- req->sock = NULL;
- /*
- * Close potential user-space file descriptors.
- */
- {
- int fd = req->fd, ret;
-
- if (fd != -1) {
- req->fd = -1;
- ret = tux_close(fd);
- if (ret)
- TUX_BUG();
- } else
- if (sock)
- sock_release(sock);
- }
- kfree_req(req);
-out:
- ;
-}
-
-static int warn_once = 1;
-
-static unsigned int writeout_log (void)
-{
- unsigned int len, pending, next_log_tail;
- mm_segment_t oldmm = get_fs();
- struct file *log_filp;
- char * str;
- unsigned int ret;
-
- if (tux_logging)
- Dprintk("TUX logger: opening log file {%s}.\n", tux_logfile);
- log_filp = tux_open_file(tux_logfile, O_CREAT|O_APPEND|O_WRONLY|O_LARGEFILE);
- if (!log_filp) {
- if (warn_once) {
- printk(KERN_ERR "TUX: could not open log file {%s}!\n",
- tux_logfile);
- warn_once = 0;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- return 0;
- }
- spin_lock(&log_lock);
- str = log_buffer + log_tail;
- if (log_head < log_tail) {
- len = LOG_LEN-log_tail;
- next_log_tail = 0;
- } else {
- len = log_head-log_tail;
- next_log_tail = log_head;
- }
- if (!len)
- goto out;
- spin_unlock(&log_lock);
-
- set_fs(KERNEL_DS);
- ret = log_filp->f_op->write(log_filp, str, len, &log_filp->f_pos);
- set_fs(oldmm);
-
- if (len != ret) {
- if (ret == -ENOSPC) {
- printk(KERN_ERR "TUX: trying to write TUX logfile %s, but filesystem is full! Lost %d bytes of log data.\n", tux_logfile, len);
- } else {
- printk(KERN_ERR "TUX: log write %d != %d.\n", ret, len);
- printk(KERN_ERR "TUX: log_filp: %p, str: %p, len: %d str[len-1]: %d.\n", log_filp, str, len, str[len-1]);
- }
- goto out_lock;
- }
-
- /*
- * Sync log data to disk:
- */
- if (log_filp->f_op && log_filp->f_op->fsync) {
- down(&log_filp->f_dentry->d_inode->i_sem);
- log_filp->f_op->fsync(log_filp, log_filp->f_dentry, 1);
- up(&log_filp->f_dentry->d_inode->i_sem);
- }
-
- /*
- * Reduce the cache footprint of the logger file - it's
- * typically write-once.
- */
- invalidate_inode_pages(log_filp->f_dentry->d_inode->i_mapping);
-
-out_lock:
- spin_lock(&log_lock);
-out:
- log_tail = next_log_tail;
- pending = (log_head-log_tail) % LOG_LEN;
- spin_unlock(&log_lock);
-
- if (pending < HARD_LIMIT)
- wake_up(&log_full);
-
- fput(log_filp);
- return pending;
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(stop_logger_wait);
-static int stop_logger = 0;
-
-static int logger_thread (void *data)
-{
- DECLARE_WAITQUEUE(wait, current);
- mm_segment_t oldmm;
-
- daemonize("TUX logger");
-
- oldmm = get_fs();
- set_fs(KERNEL_DS);
- printk(KERN_NOTICE "TUX: logger thread started.\n");
-#if CONFIG_SMP
- {
- cpumask_t log_mask, map;
-
- mask_to_cpumask(log_cpu_mask, &log_mask);
- cpus_and(map, cpu_online_map, log_mask);
- if(!(cpus_empty(map)))
- set_cpus_allowed(current, map);
-
- }
-#endif
-
-
- spin_lock_irq(¤t->sighand->siglock);
- siginitsetinv(¤t->blocked, 0);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (log_buffer)
- TUX_BUG();
- log_buffer = vmalloc(LOG_LEN);
- memset(log_buffer, 0, LOG_LEN);
- log_head = log_tail = 0;
-
- current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-
- add_wait_queue(&log_wait, &wait);
- for (;;) {
- if (tux_logging)
- Dprintk("logger does writeout - stop:%d.\n", stop_logger);
-
- while (writeout_log() >= SOFT_LIMIT) {
- if (stop_logger)
- break;
- }
- if (stop_logger)
- break;
- /* nothing */;
-
- if (tux_logging)
- Dprintk("logger does sleep - stop:%d.\n", stop_logger);
- __set_current_state(TASK_INTERRUPTIBLE);
- if (log_head != log_tail) {
- __set_current_state(TASK_RUNNING);
- continue;
- }
- schedule_timeout(HZ);
- if (tux_logging)
- Dprintk("logger back from sleep - stop:%d.\n", stop_logger);
- if (signal_pending(current))
- flush_all_signals();
- }
- remove_wait_queue(&log_wait, &wait);
-
- vfree(log_buffer);
- log_buffer = NULL;
- stop_logger = 0;
- wake_up(&stop_logger_wait);
-
- set_fs(oldmm);
-
- return 0;
-}
-
-void start_log_thread (void)
-{
- warn_once = 1;
-
- logger_pid = kernel_thread(logger_thread, NULL, 0);
- if (logger_pid < 0)
- TUX_BUG();
-}
-
-void stop_log_thread (void)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- Dprintk("stopping logger thread %d ...\n", logger_pid);
-
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&stop_logger_wait, &wait);
- stop_logger = 1;
- wake_up(&log_wait);
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&stop_logger_wait, &wait);
-
- Dprintk("logger thread stopped!\n");
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * main.c: main management and initialization routines
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-/*
- * Threads information.
- */
-unsigned int nr_tux_threads;
-static atomic_t nr_tux_threads_running = ATOMIC_INIT(0);
-static int stop_threads = 0;
-
-threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS];
-
-static void flush_all_requests (threadinfo_t *ti);
-
-void flush_all_signals (void)
-{
- spin_lock_irq(¤t->sighand->siglock);
- flush_signals(current);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-}
-
-int nr_requests_used (void)
-{
- unsigned int i, nr = 0;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- nr += ti->nr_requests - ti->nr_free_requests;
- }
-
- return nr;
-}
-
-static inline int accept_pending (threadinfo_t *ti)
-{
- int j;
-
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
- if (!ti->listen[j].proto)
- break;
- if (!ti->listen[j].sock)
- break;
- if (tcp_sk(ti->listen[j].sock->sk)->accept_queue)
- return 1;
- }
- return 0;
-}
-
-static inline int requests_pending (threadinfo_t *ti)
-{
- if (!list_empty(&ti->work_pending))
- return 1;
- return 0;
-}
-
-static int event_loop (threadinfo_t *ti)
-{
- tux_req_t *req;
- int work_done;
-
-repeat_accept:
- if (ti->thread != current)
- TUX_BUG();
-
- /*
- * Any (relevant) event on the socket will change this
- * thread to TASK_RUNNING because we add it to both
- * the main listening and the connection request socket
- * waitqueues. Thus we can do 'lazy checking' of work
- * to be done and schedule away only if the thread is
- * still TASK_INTERRUPTIBLE. This makes TUX fully
- * event driven.
- */
- set_task_state(current, TASK_INTERRUPTIBLE);
- current->flags |= PF_MEMALLOC;
- work_done = 0;
- if (accept_pending(ti))
- work_done = accept_requests(ti);
-
- if (requests_pending(ti)) {
- work_done = process_requests(ti, &req);
- if (req)
- goto handle_userspace_req;
- }
-
- /*
- * Be nice to other processes:
- */
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- goto repeat_accept;
- }
-
- if (ti->userspace_req)
- TUX_BUG();
- if (unlikely(stop_threads))
- goto handle_stop;
-
- /* Any signals? */
- if (unlikely(signal_pending(current)))
- goto handle_signal;
-
- if (work_done)
- goto repeat_accept;
- /*
- * Any socket event either on the listen socket
- * or on the request sockets will wake us up:
- */
- if ((current->state != TASK_RUNNING) &&
- !requests_pending(ti) && !accept_pending(ti)) {
- Dprintk("fast thread: no work to be done, sleeping.\n");
- schedule();
- Dprintk("fast thread: back from sleep!\n");
- goto repeat_accept;
- }
- goto repeat_accept;
-
-handle_userspace_req:
- if (req->attr)
- TUX_BUG();
- switch_docroot(req);
- ti->userspace_req = req;
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_USERSPACE_REQUEST;
-
-handle_signal:
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_SIGNAL;
-
-handle_stop:
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_EXIT;
-}
-
-static int init_queues (int nr_tux_threads)
-{
- int i;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
-
- INIT_LIST_HEAD(&ti->all_requests);
-
- ti->free_requests_lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&ti->free_requests);
-
- ti->work_lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&ti->work_pending);
- INIT_LIST_HEAD(&ti->lru);
-
- }
- return 0;
-}
-
-int tux_chroot (char *dir)
-{
- kernel_cap_t saved_cap = current->cap_effective;
- mm_segment_t oldmm;
- int err;
-
- /* Allow chroot dir to be in kernel space. */
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- cap_raise (current->cap_effective, CAP_SYS_CHROOT);
-
- err = chroot(dir);
- if (!err)
- chdir("/");
-
- current->cap_effective = saved_cap;
- set_fs(oldmm);
-
- return err;
-}
-
-/*
- * Right now this is not fully SMP-safe against multiple TUX
- * managers. It's just a rudimentary protection against typical
- * mistakes.
- */
-static int initialized = 0;
-
-#define MAX_DOCROOTLEN 500
-
-static int lookup_docroot(struct nameidata *docroot, const char *name)
-{
- int err;
-
- docroot->mnt = mntget(current->fs->rootmnt);
- docroot->dentry = dget(current->fs->root);
- docroot->last.len = 0;
- docroot->flags = LOOKUP_FOLLOW;
-
- err = path_walk(name, docroot);
- if (err) {
- mntput(docroot->mnt);
- docroot->mnt = NULL;
- return err;
- }
- return 0;
-}
-
-static int user_req_startup (void)
-{
- char name[MAX_DOCROOTLEN];
- struct nameidata *docroot;
- unsigned int i;
- int err;
-
- if (initialized)
- return -EINVAL;
- initialized = 1;
-
- /*
- * Look up the HTTP and FTP document root.
- * (typically they are shared, but can be
- * different directories.)
- */
- docroot = &tux_proto_http.main_docroot;
- if (docroot->mnt)
- TUX_BUG();
- strcpy(name, tux_common_docroot);
- strcat(name, tux_http_subdocroot);
-
- err = lookup_docroot(docroot, name);
- if (err) {
- initialized = 0;
- printk(KERN_ERR "TUX: could not look up HTTP documentroot: \"%s\"\n", name);
- return err;
- }
-
- docroot = &tux_proto_ftp.main_docroot;
- if (docroot->mnt)
- TUX_BUG();
- strcpy(name, tux_common_docroot);
- strcat(name, tux_ftp_subdocroot);
-
- err = lookup_docroot(docroot, name);
- if (err) {
-abort:
- docroot = &tux_proto_http.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- initialized = 0;
- printk(KERN_ERR "TUX: could not look up FTP documentroot: \"%s\"\n", name);
- return err;
- }
-
- /*
- * Start up the logger thread. (which opens the logfile)
- */
- start_log_thread();
-
- nr_tux_threads = tux_threads;
- if (nr_tux_threads < 1)
- nr_tux_threads = 1;
- if (nr_tux_threads > CONFIG_TUX_NUMTHREADS)
- nr_tux_threads = CONFIG_TUX_NUMTHREADS;
- tux_threads = nr_tux_threads;
-
- /*
- * Set up per-thread work-queues:
- */
- memset(threadinfo, 0, CONFIG_TUX_NUMTHREADS*sizeof(threadinfo_t));
- init_queues(nr_tux_threads);
-
- /*
- * Prepare the worker thread structures.
- */
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- ti->cpu = i;
- ti->gzip_state.workspace =
- vmalloc(zlib_deflate_workspacesize());
- if (!ti->gzip_state.workspace ||
- (zlib_deflateInit(&ti->gzip_state, 6) != Z_OK)) {
- stop_log_thread();
- goto abort;
- }
- init_MUTEX(&ti->gzip_sem);
- }
-
- __module_get(tux_module);
-
- return 0;
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_stop);
-static DECLARE_WAIT_QUEUE_HEAD(thread_stopped);
-
-static int user_req_shutdown (void)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct nameidata *docroot;
- int i, err = -EINVAL;
-
- lock_kernel();
- if (!initialized) {
- Dprintk("TUX is not up - cannot shut down.\n");
- goto err;
- }
- initialized = 0;
- stop_threads = 1;
- add_wait_queue(&thread_stopped, &wait);
-
-wait_more:
- /*
- * Wake up all the worker threads so they notice
- * that we are being stopped.
- */
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&nr_tux_threads_running)) {
- Dprintk("TUX: shutdown, %d threads still running.\n",
- atomic_read(&nr_tux_threads_running));
- wake_up(&wait_stop);
- schedule();
- goto wait_more;
- }
- set_task_state(current, TASK_RUNNING);
- stop_threads = 0;
- remove_wait_queue(&thread_stopped, &wait);
-
- if (nr_async_io_pending())
- TUX_BUG();
-
- stop_log_thread();
-
- docroot = &tux_proto_http.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- docroot = &tux_proto_ftp.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- err = 0;
-
- flush_dentry_attributes();
- free_mimetypes();
- unregister_all_tuxmodules();
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- vfree(ti->gzip_state.workspace);
- }
-
- module_put(tux_module);
-
-err:
- unlock_kernel();
- return err;
-}
-
-void drop_permissions (void)
-{
- /*
- * Userspace drops privileges already, and group
- * membership is important to keep.
- */
- /* Give the new process no privileges.. */
- current->uid = current->euid =
- current->suid = current->fsuid = tux_cgi_uid;
- current->gid = current->egid =
- current->sgid = current->fsgid = tux_cgi_gid;
- cap_clear(current->cap_permitted);
- cap_clear(current->cap_inheritable);
- cap_clear(current->cap_effective);
-}
-
-static int wait_for_others (void)
-{
- threadinfo_t *ti;
- unsigned int cpu;
-
-repeat:
- if (signal_pending(current))
- return -1;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
-
- for (cpu = 0; cpu < nr_tux_threads; cpu++) {
- ti = threadinfo + cpu;
- if (ti->listen_error)
- return -1;
- if (!ti->started)
- goto repeat;
- }
- /* ok, all threads have started up. */
- return 0;
-}
-
-static void zap_listen_sockets (threadinfo_t *ti)
-{
- struct socket *sock;
- int i;
-
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- if (!ti->listen[i].proto)
- break;
- sock = ti->listen[i].sock;
- if (!ti->listen[i].cloned && sock) {
- while (waitqueue_active(sock->sk->sk_sleep))
- yield();
- sock_release(sock);
- }
- ti->listen[i].sock = NULL;
- ti->listen[i].proto = NULL;
- ti->listen[i].cloned = 0;
- }
-}
-
-static DECLARE_MUTEX(serialize_startup);
-
-static int user_req_start_thread (threadinfo_t *ti)
-{
- unsigned int err, cpu, i, j, k;
- struct k_sigaction *ka;
-
- cpu = ti->cpu;
-#if CONFIG_SMP
- {
- unsigned int mask;
- cpumask_t cpu_mask, map;
-
- mask = 1 << ((cpu + tux_cpu_offset) % num_online_cpus());
-
- mask_to_cpumask(mask, &cpu_mask);
- cpus_and(map, cpu_mask, cpu_online_map);
- if(!(cpus_empty(map)))
- set_cpus_allowed(current, map);
- }
-#endif
- ti->thread = current;
- atomic_inc(&nr_tux_threads_running);
-
- err = start_cachemiss_threads(ti);
- if (err)
- goto out;
-
- init_waitqueue_entry(&ti->stop, current);
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- init_waitqueue_entry(ti->wait_event + j, current);
-
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
-
- /* Block all signals except SIGKILL, SIGSTOP, SIGHUP and SIGCHLD */
- spin_lock_irq(¤t->sighand->siglock);
- siginitsetinv(¤t->blocked, sigmask(SIGKILL) |
- sigmask(SIGSTOP)| sigmask(SIGHUP) | sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (!tux_listen[cpu][0].proto) {
- printk(KERN_ERR "no listen socket specified for TUX thread %d, in /proc/net/tux/%d/listen/, aborting.\n", cpu, cpu);
- goto error;
- }
-
- /*
- * Serialize startup so that listen sockets can be
- * created race-free.
- */
- down(&serialize_startup);
-
- Dprintk("thread %d initializing sockets.\n", cpu);
-
- for (k = 0; k < CONFIG_TUX_NUMSOCKETS; k++) {
- tux_socket_t *e1, *e2;
-
- e1 = tux_listen[cpu] + k;
- if (!e1->proto)
- break;
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++) {
- if (i == cpu)
- continue;
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
- e2 = tux_listen[i] + j;
- if (!e2->proto)
- continue;
- if ((e1->ip == e2->ip) && (e1->port == e2->port) && (e1->proto == e2->proto) && threadinfo[i].listen[j].proto) {
- ti->listen[k] = threadinfo[i].listen[j];
- ti->listen[k].cloned = 1;
- Dprintk("cloned socket %d from thread %d's socket %d.\n", k, i, j);
- goto next_socket;
- }
- }
- }
-
- ti->listen[k].sock = start_listening(tux_listen[cpu] + k, cpu);
- if (!ti->listen[k].sock)
- goto error_unlock;
- ti->listen[k].cloned = 0;
- ti->listen[k].proto = tux_listen[cpu][k].proto;
- Dprintk("thread %d got sock %p (%d), proto %s.\n", cpu, ti->listen[k].sock, k, ti->listen[k].proto->name);
-next_socket:
- ;
- }
- Dprintk("thread %d done initializing sockets.\n", cpu);
- up(&serialize_startup);
-
- if (wait_for_others())
- goto error_nomsg;
-
- if (!ti->listen[0].proto) {
- printk("hm, socket 0 has no protocol.\n");
- goto error;
- }
-
- add_wait_queue(&wait_stop, &ti->stop);
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- if (ti->listen[j].proto)
- add_wait_queue_exclusive(ti->listen[j].sock->sk->sk_sleep,
- ti->wait_event + j);
- drop_permissions();
-
- __module_get(tux_module);
- return 0;
-
-error_unlock:
- up(&serialize_startup);
-error:
- printk(KERN_NOTICE "TUX: could not start worker thread %d.\n", ti->cpu);
-
-error_nomsg:
- ti->listen_error = 1;
- ti->started = 0;
-
- zap_listen_sockets(ti);
- flush_all_requests(ti);
- stop_cachemiss_threads(ti);
-
- err = -EINVAL;
-
-out:
- /*
- * Last thread close the door:
- */
- if (atomic_dec_and_test(&nr_tux_threads_running))
- user_req_shutdown();
-
- return -err;
-}
-
-static int flush_idleinput (threadinfo_t * ti)
-{
- struct list_head *head, *tmp;
- tux_req_t *req;
- int count = 0;
-
- head = &ti->all_requests;
- tmp = head->next;
-
- while (tmp != head) {
- req = list_entry(tmp, tux_req_t, all);
- tmp = tmp->next;
- if (test_bit(0, &req->idle_input)) {
- idle_event(req);
- count++;
- }
- }
- return count;
-}
-
-static int flush_waitoutput (threadinfo_t * ti)
-{
- struct list_head *head, *tmp;
- tux_req_t *req;
- int count = 0;
-
- head = &ti->all_requests;
- tmp = head->next;
-
- while (tmp != head) {
- req = list_entry(tmp, tux_req_t, all);
- tmp = tmp->next;
- if (test_bit(0, &req->wait_output_space)) {
- output_space_event(req);
- count++;
- }
- }
- return count;
-}
-
-static void flush_all_requests (threadinfo_t *ti)
-{
- for (;;) {
- int count;
-
- count = flush_idleinput(ti);
- count += flush_waitoutput(ti);
- count += tux_flush_workqueue(ti);
- count += flush_freequeue(ti);
- if (!ti->nr_requests)
- break;
- /*
- * Go through again if we advanced:
- */
- if (count)
- continue;
- Dprintk("flush_all_requests: %d requests still waiting.\n", ti->nr_requests);
-#if TUX_DEBUG
- count = print_all_requests(ti);
- Dprintk("flush_all_requests: printed %d requests.\n", count);
-#endif
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ/10);
- }
-}
-
-int nr_async_io_pending (void)
-{
- unsigned int i, sum = 0;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- if (ti->iot)
- sum += ti->iot->nr_async_pending;
- }
- return sum;
-}
-
-static int user_req_stop_thread (threadinfo_t *ti)
-{
- int j;
-
- printk(KERN_NOTICE "TUX: thread %d stopping ...\n",
- (int)(ti-threadinfo));
-
- if (!ti->started)
- TUX_BUG();
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- if (ti->listen[j].proto)
- remove_wait_queue(ti->listen[j].sock->sk->sk_sleep,
- ti->wait_event + j);
- remove_wait_queue(&wait_stop, &ti->stop);
-
- Dprintk(KERN_NOTICE "TUX: thread %d waiting for sockets to go inactive ...\n", (int)(ti-threadinfo));
- zap_listen_sockets(ti);
-
- Dprintk(KERN_NOTICE "TUX: thread %d has all sockets inactive.\n", (int)(ti-threadinfo));
-
- flush_all_requests(ti);
- stop_cachemiss_threads(ti);
-
- if (ti->nr_requests)
- TUX_BUG();
- ti->started = 0;
-
- printk(KERN_INFO "TUX: thread %d stopped.\n", ti->cpu);
-
- ti->thread = NULL;
- current->tux_info = NULL;
- current->tux_exit = NULL;
- atomic_dec(&nr_tux_threads_running);
- wake_up(&thread_stopped);
-
- module_put(tux_module);
-
- return 0;
-}
-
-#define COPY_INT(u_field, k_field) \
-do { \
- if (__copy_to_user(&u_info->u_field, &req->k_field, \
- sizeof(req->k_field))) \
- return_EFAULT; \
-} while (0)
-
-#define GETLEN(k_field, maxlen) \
- ((req->k_field##_len < maxlen) ? \
- req->k_field##_len : maxlen-1)
-
-#define COPY_STR(u_field, k_field, maxlen) \
-do { \
- if (__copy_to_user(u_info->u_field, req->k_field##_str, \
- GETLEN(k_field, maxlen))) \
- return_EFAULT; \
-} while (0)
-
-#define COPY_COND_STR(u_field,k_field,maxlen) \
-do { \
- if (req->k_field##_len) \
- COPY_STR(u_field, k_field, maxlen); \
- if (__put_user((char)0, u_info->u_field + \
- GETLEN(k_field, maxlen))) \
- return_EFAULT; \
-} while (0)
-
-static void finish_userspace_req (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- ti->userspace_req = NULL;
- req->usermode = 0;
- req->private = 0;
- req->error = 0;
- DEC_STAT(nr_userspace_pending);
- flush_request(req, 0);
-}
-
-static void zap_userspace_req (tux_req_t *req)
-{
- clear_keepalive(req);
- finish_userspace_req(req);
-}
-
-/*
- * Fills in the user-space request structure:
- */
-static int prepare_userspace_req (threadinfo_t *ti, user_req_t *u_info)
-{
- u64 u_req;
- tux_req_t *req = ti->userspace_req;
- unsigned int tmp;
- int filelen;
- int fd;
-
- Dprintk("prepare_userspace_req(%p).\n", req);
- if (!req)
- TUX_BUG();
- if (req->error) {
- TDprintk("userspace request has error %d.\n", req->error);
- return -1;
- }
- fd = req->fd;
- if (fd == -1) {
- fd = sock_map_fd(req->sock);
- Dprintk("sock_map_fd(%p) :%d.\n", req, fd);
- if (fd < 0) {
- Dprintk("sock_map_fd() returned %d.\n", fd);
- return -EMFILE;
- }
- req->fd = fd;
- }
-
-#define return_EFAULT do { Dprintk("-EFAULT at %d:%s.\n", __LINE__, __FILE__); return -EFAULT; } while (0)
-
- if (!access_ok(VERIFY_WRITE, u_info, sizeof(*u_info)))
- return_EFAULT;
- if (__copy_to_user(&u_info->sock, &fd, sizeof(fd)))
- return_EFAULT;
- if (req->attr)
- TUX_BUG();
-
- COPY_INT(module_index, usermodule_idx);
-
- COPY_COND_STR(query, query, MAX_URI_LEN);
-
- COPY_INT(event, event);
- Dprintk("prepare userspace, user error: %d, event %d.\n", req->user_error, req->event);
- COPY_INT(error, user_error);
- req->user_error = 0;
-
- filelen = req->total_file_len;
- if (filelen < 0)
- filelen = 0;
- if (__copy_to_user(&u_info->objectlen, &filelen, sizeof(filelen)))
- return_EFAULT;
- if ((req->method == METHOD_POST) && !filelen)
- if (__copy_to_user(&u_info->objectlen,
- &req->content_len, sizeof(filelen)))
- return_EFAULT;
- if (req->objectname_len) {
- if (req->objectname[req->objectname_len])
- TUX_BUG();
- if (__copy_to_user(u_info->objectname, req->objectname,
- req->objectname_len + 1))
- return_EFAULT;
- } else
- if (__put_user((char)0, u_info->objectname))
- return_EFAULT;
-
- COPY_INT(http_version, version);
- COPY_INT(http_method, method);
- COPY_INT(keep_alive, keep_alive);
-
- COPY_INT(cookies_len, cookies_len);
- if (req->cookies_len)
- COPY_STR(cookies, cookies, MAX_COOKIE_LEN);
- if (__put_user((char)0, u_info->cookies + req->cookies_len))
- return_EFAULT;
-
- u_req = (u64)(unsigned long)req;
- if (__copy_to_user(&u_info->id, &u_req, sizeof(u_req)))
- return_EFAULT;
- COPY_INT(priv, private);
- COPY_INT(bytes_sent, bytes_sent);
-
- tmp = inet_sk(req->sock->sk)->daddr;
- if (__copy_to_user(&u_info->client_host, &tmp, sizeof(tmp)))
- return_EFAULT;
-
- COPY_COND_STR(content_type, content_type, MAX_FIELD_LEN);
- COPY_COND_STR(user_agent, user_agent, MAX_FIELD_LEN);
- COPY_COND_STR(accept, accept, MAX_FIELD_LEN);
- COPY_COND_STR(accept_charset, accept_charset, MAX_FIELD_LEN);
- COPY_COND_STR(accept_encoding, accept_encoding, MAX_FIELD_LEN);
- COPY_COND_STR(accept_language, accept_language, MAX_FIELD_LEN);
- COPY_COND_STR(cache_control, cache_control, MAX_FIELD_LEN);
- COPY_COND_STR(if_modified_since, if_modified_since, MAX_FIELD_LEN);
- COPY_COND_STR(negotiate, negotiate, MAX_FIELD_LEN);
- COPY_COND_STR(pragma, pragma, MAX_FIELD_LEN);
- COPY_COND_STR(referer, referer, MAX_FIELD_LEN);
-
- return TUX_RETURN_USERSPACE_REQUEST;
-}
-
-#define GOTO_ERR_no_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_no_unlock; } while (0)
-#define GOTO_ERR_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_unlock; } while (0)
-
-static int register_mimetype(user_req_t *u_info)
-{
- char extension[MAX_URI_LEN], mimetype[MAX_URI_LEN], expires[MAX_URI_LEN];
- u64 u_addr;
- char *addr;
- int ret;
-
- ret = strncpy_from_user(extension, u_info->objectname, MAX_URI_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- extension[ret] = 0;
- Dprintk("got MIME extension: %s.\n", extension);
- ret = copy_from_user(&u_addr, &u_info->object_addr, sizeof(u_addr));
- if (ret)
- GOTO_ERR_no_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = strncpy_from_user(mimetype, addr, MAX_URI_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- mimetype[ret] = 0;
- Dprintk("got MIME type: %s.\n", mimetype);
- ret = strncpy_from_user(expires, u_info->cache_control, MAX_URI_LEN);
- if (ret >= 0)
- expires[ret] = 0;
- else
- expires[0] = 0;
- Dprintk("got expires header: %s.\n", expires);
-
- add_mimetype(extension, mimetype, expires);
- ret = 0;
-err_no_unlock:
- return ret;
-}
-
-void user_send_buffer (tux_req_t *req, int cachemiss)
-{
- int ret;
-
-
- SET_TIMESTAMP(req->output_timestamp);
-
-repeat:
- ret = send_sync_buf(req, req->sock, req->userbuf, req->userlen, MSG_DONTWAIT | MSG_MORE);
- switch (ret) {
- case -EAGAIN:
- add_tux_atom(req, user_send_buffer);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- INC_STAT(user_sendbuf_write_misses);
- break;
- default:
- if (ret <= 0) {
- req_err(req);
- req->usermode = 0;
- req->private = 0;
- add_req_to_workqueue(req);
- break;
- }
- req->userbuf += ret;
- req->userlen -= ret;
- if ((int)req->userlen < 0)
- TUX_BUG();
- if (req->userlen)
- goto repeat;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-void user_send_object (tux_req_t *req, int cachemiss)
-{
- int ret;
-
-
- SET_TIMESTAMP(req->output_timestamp);
-
-repeat:
- ret = generic_send_file(req, req->sock, cachemiss);
- switch (ret) {
- case -5:
- add_tux_atom(req, user_send_object);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, user_send_object);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- INC_STAT(user_sendobject_write_misses);
- break;
- case -3:
- INC_STAT(user_sendobject_cachemisses);
- add_tux_atom(req, user_send_object);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-void user_get_object (tux_req_t *req, int cachemiss)
-{
- int missed;
-
- if (!req->dentry) {
- req->usermode = 0;
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (req->usermode)
- TUX_BUG();
- req->usermode = 1;
- if (!missed && !req->dentry) {
- req->error = 0;
- req->user_error = -ENOENT;
- add_req_to_workqueue(req);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- INC_STAT(user_lookup_cachemisses);
-fetch_missed:
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- add_tux_atom(req, user_get_object);
- queue_cachemiss(req);
- return;
- }
- }
- req->total_file_len = req->dentry->d_inode->i_size;
- if (!req->output_len)
- req->output_len = req->total_file_len;
- if (tux_fetch_file(req, !cachemiss)) {
- INC_STAT(user_fetch_cachemisses);
- goto fetch_missed;
- }
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
-}
-
-asmlinkage long __sys_tux (unsigned int action, user_req_t *u_info)
-{
- int ret = -1;
- threadinfo_t *ti;
- tux_req_t *req;
-
- if (action != TUX_ACTION_CURRENT_DATE)
- Dprintk("got sys_tux(%d, %p).\n", action, u_info);
-
- if (action >= MAX_TUX_ACTION)
- GOTO_ERR_no_unlock;
-
- ti = (threadinfo_t *) current->tux_info;
- if (ti)
- if (ti->thread != current)
- TUX_BUG();
-
- if (!capable(CAP_SYS_ADMIN)
- && (action != TUX_ACTION_CONTINUE_REQ) &&
- (action != TUX_ACTION_STOPTHREAD))
- goto userspace_actions;
-
- switch (action) {
- case TUX_ACTION_CONTINUE_REQ:
- ret = continue_request((int)(long)u_info);
- goto out;
-
- case TUX_ACTION_STARTUP:
- lock_kernel();
- ret = user_req_startup();
- unlock_kernel();
- goto out;
-
- case TUX_ACTION_SHUTDOWN:
- lock_kernel();
- ret = user_req_shutdown();
- unlock_kernel();
- goto out;
-
- case TUX_ACTION_REGISTER_MODULE:
- ret = user_register_module(u_info);
- goto out;
-
- case TUX_ACTION_UNREGISTER_MODULE:
- ret = user_unregister_module(u_info);
- goto out;
-
- case TUX_ACTION_STARTTHREAD:
- {
- unsigned int nr;
-
- ret = copy_from_user(&nr, &u_info->thread_nr,
- sizeof(int));
- if (ret)
- GOTO_ERR_no_unlock;
- if (nr >= nr_tux_threads)
- GOTO_ERR_no_unlock;
- ti = threadinfo + nr;
- if (ti->started)
- GOTO_ERR_unlock;
- ti->started = 1;
- current->tux_info = ti;
- current->tux_exit = tux_exit;
- if (ti->thread)
- TUX_BUG();
- Dprintk("TUX: current open files limit for TUX%d: %ld.\n", nr, current->rlim[RLIMIT_NOFILE].rlim_cur);
- lock_kernel();
- ret = user_req_start_thread(ti);
- unlock_kernel();
- if (ret) {
- current->tux_info = NULL;
- current->tux_exit = NULL;
- } else {
- if (ti->thread != current)
- TUX_BUG();
- }
- goto out_userreq;
- }
-
- case TUX_ACTION_STOPTHREAD:
- if (!ti)
- GOTO_ERR_no_unlock;
- if (!ti->started)
- GOTO_ERR_unlock;
- req = ti->userspace_req;
- if (req)
- zap_userspace_req(req);
-
- lock_kernel();
- ret = user_req_stop_thread(ti);
- unlock_kernel();
- goto out_userreq;
-
- case TUX_ACTION_CURRENT_DATE:
- ret = strncpy_from_user(tux_date, u_info->new_date,
- DATE_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- goto out;
-
- case TUX_ACTION_REGISTER_MIMETYPE:
- ret = register_mimetype(u_info);
- if (ret)
- GOTO_ERR_no_unlock;
- goto out;
-
- case TUX_ACTION_QUERY_VERSION:
- ret = (TUX_MAJOR_VERSION << 24) | (TUX_MINOR_VERSION << 16) | TUX_PATCHLEVEL_VERSION;
- goto out;
- default:
- ;
- }
-
-userspace_actions:
-
- if (!ti)
- GOTO_ERR_no_unlock;
-
- if (!ti->started)
- GOTO_ERR_unlock;
-
- req = ti->userspace_req;
- if (!req) {
- if (action == TUX_ACTION_EVENTLOOP)
- goto eventloop;
- GOTO_ERR_unlock;
- }
- if (!req->usermode)
- TUX_BUG();
-
- ret = copy_from_user(&req->event, &u_info->event, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->status, &u_info->http_status, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->bytes_sent, &u_info->bytes_sent, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->private, &u_info->priv, sizeof(req->private));
- if (ret)
- GOTO_ERR_unlock;
-
- switch (action) {
-
- case TUX_ACTION_EVENTLOOP:
-eventloop:
- req = ti->userspace_req;
- if (req)
- zap_userspace_req(req);
- ret = event_loop(ti);
- goto out_userreq;
-
- /*
- * Module forces keepalive off, server will close
- * the connection.
- */
- case TUX_ACTION_FINISH_CLOSE_REQ:
- clear_keepalive(req);
-
- case TUX_ACTION_FINISH_REQ:
- finish_userspace_req(req);
- goto eventloop;
-
- case TUX_ACTION_REDIRECT_REQ:
-
- ti->userspace_req = NULL;
- req->usermode = 0;
- req->private = 0;
- req->error = TUX_ERROR_REDIRECT;
- DEC_STAT(nr_userspace_pending);
- add_tux_atom(req, redirect_request);
- add_req_to_workqueue(req);
-
- goto eventloop;
-
- case TUX_ACTION_POSTPONE_REQ:
-
- postpone_request(req);
- ti->userspace_req = NULL;
- ret = TUX_RETURN_USERSPACE_REQUEST;
- break;
-
- case TUX_ACTION_GET_OBJECT:
- release_req_dentry(req);
- ret = strncpy_from_user(req->objectname,
- u_info->objectname, MAX_URI_LEN-1);
- if (ret <= 0) {
- req->objectname[0] = 0;
- req->objectname_len = 0;
- GOTO_ERR_unlock;
- }
- req->objectname[ret] = 0; // string delimit
- req->objectname_len = ret;
-
- Dprintk("got objectname {%s} (%d) from user-space req %p (req: %p).\n", req->objectname, req->objectname_len, u_info, req);
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_get_object(req, 0);
- goto eventloop;
-
- case TUX_ACTION_READ_OBJECT:
- {
- u64 u_addr;
- char *addr;
- loff_t ppos = 0;
- struct file *filp;
-
- if (!req->dentry)
- GOTO_ERR_unlock;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- filp = dentry_open(req->dentry, O_RDONLY, 0);
- dget(req->dentry);
- generic_file_read(filp, addr, req->total_file_len, &ppos);
- fput(filp);
- ret = TUX_RETURN_USERSPACE_REQUEST;
- break;
- }
-
- case TUX_ACTION_SEND_OBJECT:
- if (!req->dentry)
- GOTO_ERR_unlock;
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_send_object(req, 0);
- goto eventloop;
-
- case TUX_ACTION_SEND_BUFFER:
- {
- u64 u_addr;
- char *addr;
- unsigned int len;
-
- ret = copy_from_user(&u_addr,
- &u_info->object_addr, sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = copy_from_user(&len,
- &u_info->objectlen, sizeof(addr));
- if (ret)
- GOTO_ERR_unlock;
- if ((int)len <= 0)
- GOTO_ERR_unlock;
-
- ret = -EFAULT;
- if (!access_ok(VERIFY_READ, addr, len))
- GOTO_ERR_unlock;
- req->userbuf = addr;
- req->userlen = len;
-
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_send_buffer(req, 0);
- ret = 0;
- goto eventloop;
- }
-
- case TUX_ACTION_READ_HEADERS:
- {
- char *addr;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = copy_to_user(&u_info->objectlen,
- &req->headers_len, sizeof(req->headers_len));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_to_user(addr,req->headers, req->headers_len);
- if (ret)
- GOTO_ERR_unlock;
- break;
- }
-
- case TUX_ACTION_READ_POST_DATA:
- {
- char *addr;
- unsigned int size;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
-
- ret = copy_from_user(&size, &u_info->objectlen,
- sizeof(size));
- if (ret)
- GOTO_ERR_unlock;
- Dprintk("READ_POST_DATA: got %p(%d).\n", addr, size);
- if (req->post_data_len < size)
- size = req->post_data_len;
- Dprintk("READ_POST_DATA: writing %d.\n", size);
- ret = copy_to_user(&u_info->objectlen,
- &size, sizeof(size));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_to_user(addr, req->post_data_str, size);
- if (ret)
- GOTO_ERR_unlock;
- goto out;
- }
-
- case TUX_ACTION_WATCH_PROXY_SOCKET:
- {
- struct socket *sock;
- int err;
- long fd;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- fd = (int)(unsigned long)u_addr;
-
- sock = sockfd_lookup(fd, &err);
- if (!sock)
- GOTO_ERR_unlock;
- put_data_sock(req);
- link_tux_data_socket(req, sock);
-
- ret = 0;
- goto out;
- }
-
- case TUX_ACTION_WAIT_PROXY_SOCKET:
- {
- if (!req->data_sock)
- GOTO_ERR_unlock;
- if (socket_input(req->data_sock)) {
- ret = TUX_RETURN_USERSPACE_REQUEST;
- goto out_userreq;
- }
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (socket_input(req->data_sock)) {
- unidle_req(req);
- ret = TUX_RETURN_USERSPACE_REQUEST;
- goto out_userreq;
- }
- req->ti->userspace_req = NULL;
- goto eventloop;
- }
-
- default:
- GOTO_ERR_unlock;
- }
-
-out_userreq:
- req = ti->userspace_req;
- if (req) {
- ret = prepare_userspace_req(ti, u_info);
- if (ret < 0) {
- TDprintk("hm, user req %p returned %d, zapping.\n",
- req, ret);
- zap_userspace_req(req);
- goto eventloop;
- }
- }
-out:
- if (action != TUX_ACTION_CURRENT_DATE)
- Dprintk("sys_tux(%d, %p) returning %d.\n", action, u_info, ret);
- while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- }
- return ret;
-err_unlock:
-err_no_unlock:
- Dprintk("sys_tux(%d, %p) returning -EINVAL (ret:%d)!\n", action, u_info, ret);
- while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- }
- return -EINVAL;
-}
-
-/*
- * This gets called if a TUX thread does an exit().
- */
-void tux_exit (void)
-{
- __sys_tux(TUX_ACTION_STOPTHREAD, NULL);
-}
-
-int tux_init(void)
-{
- start_sysctl();
-
-#if CONFIG_TUX_MODULE
- spin_lock(&tux_module_lock);
- sys_tux_ptr = __sys_tux;
- tux_module = THIS_MODULE;
- spin_unlock(&tux_module_lock);
-#endif
-
- return 0;
-}
-
-void tux_cleanup (void)
-{
-#if CONFIG_TUX_MODULE
- spin_lock(&tux_module_lock);
- tux_module = NULL;
- sys_tux_ptr = NULL;
- spin_unlock(&tux_module_lock);
-#endif
-
- end_sysctl();
-}
-
-module_init(tux_init)
-module_exit(tux_cleanup)
-
-MODULE_LICENSE("GPL");
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * mod.c: loading/registering of dynamic TUX modules
- */
-
-#include <net/tux.h>
-#include <linux/kmod.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-spinlock_t tuxmodules_lock = SPIN_LOCK_UNLOCKED;
-static LIST_HEAD(tuxmodules_list);
-
-tcapi_template_t * get_first_usermodule (void)
-{
- tcapi_template_t *tcapi;
- struct list_head *head, *curr, *next;
-
- spin_lock(&tuxmodules_lock);
- head = &tuxmodules_list;
- next = head->next;
-
- while ((curr = next) != head) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- next = curr->next;
- if (tcapi->userspace_id) {
- spin_unlock(&tuxmodules_lock);
- return tcapi;
- }
- }
- spin_unlock(&tuxmodules_lock);
- return NULL;
-}
-
-static tcapi_template_t * lookup_module (const char *vfs_name)
-{
- tcapi_template_t *tcapi;
- struct list_head *head, *curr, *next;
-
- while (*vfs_name == '/')
- vfs_name++;
- Dprintk("looking up TUX module {%s}.\n", vfs_name);
- head = &tuxmodules_list;
- next = head->next;
-
- while ((curr = next) != head) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- next = curr->next;
- Dprintk("checking module {%s} == {%s}?\n", vfs_name, tcapi->vfs_name);
- if (!strcmp(tcapi->vfs_name, vfs_name))
- return tcapi;
- }
- return NULL;
-}
-
-/*
- * Attempt to load a TUX application module.
- * This is the slow path, we cache ('link') the module's
- * API vector to the inode.
- * The module loading path is serialized, and we handshake
- * with the loaded module and fetch its API vector.
- */
-tcapi_template_t * lookup_tuxmodule (const char *filename)
-{
- tcapi_template_t *tcapi;
-
- spin_lock(&tuxmodules_lock);
- tcapi = lookup_module(filename);
- if (!tcapi)
- Dprintk("did not find module vfs:{%s}\n", filename);
- spin_unlock(&tuxmodules_lock);
- return tcapi;
-}
-
-
-int register_tuxmodule (tcapi_template_t *tcapi)
-{
- int ret = -EEXIST;
-
- spin_lock(&tuxmodules_lock);
-
- if (lookup_module(tcapi->vfs_name)) {
- Dprintk("module with VFS binding '%s' already registered!\n",
- tcapi->vfs_name);
- goto out;
- }
-
- list_add(&tcapi->modules, &tuxmodules_list);
- ret = 0;
- Dprintk("TUX module %s registered.\n", tcapi->vfs_name);
-out:
- spin_unlock(&tuxmodules_lock);
-
- return ret;
-}
-
-void unregister_all_tuxmodules (void)
-{
- tcapi_template_t *tcapi;
- struct list_head *curr;
-
- spin_lock(&tuxmodules_lock);
- while (((curr = tuxmodules_list.next)) != &tuxmodules_list) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- list_del(curr);
- kfree(tcapi->vfs_name);
- kfree(tcapi);
- }
- spin_unlock(&tuxmodules_lock);
-}
-
-tcapi_template_t * unregister_tuxmodule (char *vfs_name)
-{
- tcapi_template_t *tcapi;
- int err = 0;
-
- spin_lock(&tuxmodules_lock);
- tcapi = lookup_module(vfs_name);
- if (!tcapi) {
- Dprintk("huh, module %s not registered??\n", vfs_name);
- err = -1;
- } else {
- list_del(&tcapi->modules);
- Dprintk("TUX module %s unregistered.\n", vfs_name);
- }
- spin_unlock(&tuxmodules_lock);
-
- return tcapi;
-}
-
-static int check_module_version (user_req_t *u_info)
-{
- int major, minor, patch, ret;
-
- ret = copy_from_user(&major, &u_info->version_major, sizeof(int));
- ret += copy_from_user(&minor, &u_info->version_minor, sizeof(int));
- ret += copy_from_user(&patch, &u_info->version_patch, sizeof(int));
- if (ret)
- return -EFAULT;
-
- if ((major != TUX_MAJOR_VERSION) || (minor > TUX_MINOR_VERSION)) {
-
- printk(KERN_ERR "TUX: module version %d:%d incompatible with kernel version %d:%d!\n", major, minor, TUX_MAJOR_VERSION, TUX_MINOR_VERSION);
- return -EINVAL;
- }
- return 0;
-}
-
-int user_register_module (user_req_t *u_info)
-{
- int idx, len, ret;
- tcapi_template_t *tcapi;
- char modulename [MAX_URI_LEN+1];
-
- ret = check_module_version(u_info);
- if (ret)
- return ret;
-
- /*
- * Check module name length.
- */
- ret = strnlen_user(u_info->objectname, MAX_URI_LEN+2);
- if (ret < 0)
- goto out;
- ret = -EINVAL;
- if (ret >= MAX_URI_LEN)
- goto out;
-
- Dprintk("register user-module, %p.\n", u_info);
- ret = strncpy_from_user(modulename, u_info->objectname, MAX_URI_LEN);
- if (ret < 0)
- goto out;
- modulename[ret] = 0;
- Dprintk("... user-module is: {%s}.\n", modulename);
- len = strlen(modulename);
- if (!len)
- printk(KERN_ERR "no module name provided: please upgrade your TUX user-space utilities!\n");
- if (!len || (len > MAX_URI_LEN))
- return -EINVAL;
- Dprintk("... user-module len is: %d.\n", len);
-
- ret = copy_from_user(&idx, &u_info->module_index, sizeof(int));
- if (ret || !idx)
- goto out;
- Dprintk("... user-module index is: %d.\n", idx);
-
- ret = -ENOMEM;
- tcapi = (tcapi_template_t *) kmalloc(sizeof(*tcapi), GFP_KERNEL);
- if (!tcapi)
- goto out;
- memset(tcapi, 0, sizeof(*tcapi));
-
- tcapi->vfs_name = (char *) kmalloc(len+1, GFP_KERNEL);
- if (!tcapi->vfs_name) {
- kfree(tcapi);
- goto out;
- }
- strcpy(tcapi->vfs_name, modulename);
- tcapi->userspace_id = idx;
-
- Dprintk("... registering module {%s}.\n", tcapi->vfs_name);
- ret = register_tuxmodule(tcapi);
-out:
- return ret;
-}
-
-int user_unregister_module (user_req_t *u_info)
-{
- int len, ret;
- tcapi_template_t *tcapi;
- char modulename [MAX_URI_LEN+1];
-
- /*
- * Check module name length.
- */
- ret = strnlen_user(u_info->objectname, MAX_URI_LEN+2);
- if (ret < 0)
- goto out;
- ret = -EINVAL;
- if (ret >= MAX_URI_LEN)
- goto out;
- Dprintk("unregister user-module, %p.\n", u_info);
- ret = strncpy_from_user(modulename, u_info->objectname, MAX_URI_LEN);
- if (ret <= 0)
- goto out;
- modulename[ret] = 0;
- Dprintk("... user-module is: {%s}.\n", modulename);
- len = strlen(modulename);
- if (!len || (len > MAX_URI_LEN))
- return -EINVAL;
- Dprintk("... user-module len is: %d.\n", len);
-
- Dprintk("... unregistering module {%s}.\n", modulename);
- tcapi = unregister_tuxmodule(modulename);
- ret = -EINVAL;
- if (tcapi) {
- ret = 0;
- kfree(tcapi->vfs_name);
- kfree(tcapi);
- }
-out:
- return ret;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * output.c: Send data to clients
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-int send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags)
-{
- struct msghdr msg;
- struct iovec iov;
- int len, written = 0, left = length;
- struct tcp_opt *tp = tcp_sk(sock->sk);
-
- tp->nonagle = 2;
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags | MSG_NOSIGNAL;
-repeat_send:
- msg.msg_iov->iov_len = left;
- msg.msg_iov->iov_base = (char *) buf + written;
-
- len = sock_sendmsg(sock, &msg, left);
-
- Dprintk("sendmsg ret: %d, written: %d, left: %d.\n", len,written,left);
- if ((len == -ERESTARTSYS) || (!(flags & MSG_DONTWAIT) &&
- (len == -EAGAIN))) {
- flush_all_signals();
- goto repeat_send;
- }
- if (len > 0) {
- written += len;
- left -= len;
- if (left)
- goto repeat_send;
- }
- if (len >= 0) {
- if (written != length)
- TUX_BUG();
- if (left)
- TUX_BUG();
- }
- if (req && (written > 0))
- req->bytes_sent += written;
- Dprintk("sendmsg FINAL ret: %d, written: %d, left: %d.\n", len,written,left);
- return written ? written : len;
-}
-
-unsigned int tux_zerocopy_sendfile = 1;
-
-typedef struct sock_send_desc
-{
- struct socket *sock;
- tux_req_t *req;
-} sock_send_desc_t;
-
-static int sock_send_actor (read_descriptor_t * desc, struct page *page,
- unsigned long offset, unsigned long orig_size)
-{
- sock_send_desc_t *sock_desc = (sock_send_desc_t *)desc->arg.buf;
- struct socket *sock = sock_desc->sock;
- tux_req_t *req = sock_desc->req;
- unsigned int flags;
- ssize_t written;
- char *buf = NULL;
- unsigned int size;
-
- flags = MSG_DONTWAIT | MSG_NOSIGNAL;
- if (desc->count < orig_size)
- orig_size = desc->count;
- if (desc->count > orig_size)
- flags |= MSG_MORE;
- Dprintk("sock_send_actor(), page: %p, offset: %ld, orig_size: %ld, sock: %p, desc->count: %d, desc->written: %d, MSG_MORE: %d.\n", page, offset, orig_size, sock, desc->count, desc->written, flags & MSG_MORE);
-
- if (req->content_gzipped >= 2) {
- unsigned int gzip_left;
- struct msghdr msg;
- struct iovec iov;
- mm_segment_t oldmm;
- char *kaddr = kmap(page);
- __u32 in_len, out_len;
- out_len = orig_size*101/100 + 12;
- buf = tux_kmalloc(out_len);
- in_len = orig_size;
- size = out_len;
- gzip_left = 0;
-// 8b1f 0808 fdc4 3bd8 0300 79
-buf[1] = 0x8b; buf[0] = 0x1f; buf[3] = 0x08; buf[2] = 0x08;
-buf[5] = 0xfd; buf[4] = 0xc4; buf[7] = 0x3b; buf[6] = 0xd8;
-buf[9] = 0x03; buf[8] = 0x00; buf[10] = 0x79;
- size += 11;
- Dprintk("pre-compress: in_len: %d, out_len: %d, gzip_left: %d, uncompressed size: %d.\n", in_len, out_len, gzip_left, size);
- gzip_left = tux_gzip_compress(req, kaddr, buf+11, &in_len, &out_len);
- size -= out_len;
- buf[11] = 0x79; buf[12] = 0x00;
-
- Dprintk("post-compress: in_len: %d, out_len: %d, gzip_left: %d, compressed size: %d.\n", in_len, out_len, gzip_left, size);
- kunmap(page);
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- flags &= ~MSG_DONTWAIT;
- msg.msg_flags = flags;
- iov.iov_base = buf;
- iov.iov_len = size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- written = sock_sendmsg(sock, &msg, size);
- set_fs(oldmm);
-
- Dprintk("buf: %p, offset: %ld, size: %d, written: %d.\n", buf, offset, size, written);
- if (written == size)
- written = orig_size;
- else
- written = size;
-
- } else {
- size = orig_size;
- if (tux_zerocopy_sendfile && sock->ops->sendpage &&
- (sock->sk->sk_route_caps&NETIF_F_SG)) {
- written = sock->ops->sendpage(sock, page, offset, size, flags);
- } else {
- struct msghdr msg;
- struct iovec iov;
- char *kaddr;
- mm_segment_t oldmm;
-
- if (offset+size > PAGE_SIZE)
- return -EFAULT;
-
- kaddr = kmap(page);
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags;
- iov.iov_base = kaddr + offset;
- iov.iov_len = size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- written = sock_sendmsg(sock, &msg, size);
- set_fs(oldmm);
-
- Dprintk("kaddr: %p, offset: %ld, size: %d, written: %d.\n", kaddr, offset, size, written);
- kunmap(page);
- }
- }
- if (written < 0) {
- desc->error = written;
- written = 0;
- }
- Dprintk("desc->count: %d, desc->written: %d, written: %d.\n", desc->count, desc->written, written);
- desc->count -= written;
- if ((int)desc->count < 0)
- TUX_BUG();
- desc->written += written;
-
- if (buf)
- kfree(buf);
-
- return written;
-}
-
-/*
- * Return 1 if the output space condition went away
- * before adding the handler.
- */
-int add_output_space_event (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
- /*
- * blocked due to socket IO?
- */
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0,&req->wait_output_space))
- TUX_BUG();
- INC_STAT(nr_output_space_pending);
-
- if ((sk->sk_state == TCP_ESTABLISHED) && enough_wspace(sk)) {
- if (test_and_clear_bit(0, &req->wait_output_space)) {
- DEC_STAT(nr_output_space_pending);
- del_keepalive_timer(req);
- spin_unlock_irq(&req->ti->work_lock);
- return 1;
- }
- }
- spin_unlock_irq(&req->ti->work_lock);
-
- return 0;
-}
-
-#define SEND_BLOCKSIZE (164*1024)
-
-int generic_send_file (tux_req_t *req, struct socket *sock, int cachemiss)
-{
- sock_send_desc_t sock_desc;
- int len, want, nonblock = !cachemiss;
- struct tcp_opt *tp = tcp_sk(sock->sk);
-
- tp->nonagle = 2;
-
- sock_desc.sock = sock;
- sock_desc.req = req;
-
-repeat:
- Dprintk("generic_send_file(%p,%d,%p) called, f_pos: %Ld, output_len: %Ld.\n", req, nonblock, sock, req->in_file.f_pos, req->output_len);
-
- if (req->proto->check_req_err(req, cachemiss))
- return -1;
- if (connection_too_fast(req) == 2) {
- len = -5;
- goto out;
- }
- if (req->total_file_len < req->in_file.f_pos)
- TUX_BUG();
-
- req->desc.written = 0;
- /*
- * Careful, output_len can be 64-bit, while 'want' can be 32-bit.
- */
- if (req->output_len > SEND_BLOCKSIZE)
- want = SEND_BLOCKSIZE;
- else
- want = req->output_len;
- req->desc.count = want;
- req->desc.arg.buf = (char *) &sock_desc;
- req->desc.error = 0;
- Dprintk("sendfile(), desc.count: %d.\n", req->desc.count);
- do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc, sock_send_actor, nonblock);
- if (req->desc.written > 0) {
- req->bytes_sent += req->desc.written;
- req->output_len -= req->desc.written;
- }
- if (!nonblock && (req->desc.error == -EWOULDBLOCKIO))
- TUX_BUG();
- Dprintk("sendfile() wrote: %d bytes.\n", req->desc.written);
- if (req->output_len && !req->desc.written && !req->desc.error) {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return -1;
- }
-
- switch (req->desc.error) {
-
- case -EWOULDBLOCKIO:
- len = -3;
- break;
- case -EAGAIN:
-no_write_space:
- Dprintk("sk->wmem_queued: %d, sk->sndbuf: %d.\n",
- sock->sk->sk_wmem_queued, sock->sk->sk_sndbuf);
- len = -4;
- break;
- default:
- len = req->desc.written;
-#if CONFIG_TUX_DEBUG
- if (req->desc.error)
- TDprintk("TUX: sendfile() returned error %d (signals pending: %08lx)!\n", req->desc.error, current->pending.signal.sig[0]);
-#endif
- if (!req->desc.error) {
- if (req->output_len < 0)
- BUG();
- if (req->output_len) {
- if (test_bit(SOCK_NOSPACE, &sock->flags))
- goto no_write_space;
- goto repeat;
- }
- }
-#if CONFIG_TUX_DEBUG
- if (req->desc.written != want)
- TDprintk("TUX: sendfile() wrote %d bytes, wanted %d! (pos %Ld) (signals pending: %08lx).\n", req->desc.written, want, req->in_file.f_pos, current->pending.signal.sig[0]);
- else
- Dprintk("TUX: sendfile() FINISHED for req %p, wrote %d bytes.\n", req, req->desc.written);
- req->bytes_expected = 0;
-#endif
- break;
- }
-
-out:
- Dprintk("sendfile() wrote %d bytes.\n", len);
-
- return len;
-}
-
-static int file_fetch_actor (read_descriptor_t * desc, struct page *page,
- unsigned long offset, unsigned long size)
-{
- if (desc->count < size)
- size = desc->count;
-
- desc->count -= size;
- desc->written += size;
-
- return size;
-}
-
-int tux_fetch_file (tux_req_t *req, int nonblock)
-{
- int len;
-
- req->desc.written = 0;
- req->desc.count = req->output_len;
- req->desc.arg.buf = NULL;
- req->desc.error = 0;
-
- do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc,
- file_fetch_actor, nonblock);
- if (nonblock && (req->desc.error == -EWOULDBLOCKIO))
- return 1;
- len = req->desc.written;
- if (req->desc.error)
- Dprintk("fetchfile() returned %d error!\n", req->desc.error);
- Dprintk("fetchfile() fetched %d bytes.\n", len);
- return 0;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, Ingo Molnar <mingo@redhat.com>
- *
- * parser.h: generic parsing routines
- */
-
-#define get_c(ptr,left) \
-({ \
- char __ret; \
- \
- if (!left) \
- GOTO_INCOMPLETE; \
- left--; \
- __ret = *((ptr)++); \
- if (!__ret) \
- GOTO_REDIR; \
- __ret; \
-})
-
-#define PARSE_TOKEN(ptr,str,left) \
- ({ \
- int __ret; \
- \
- if (!left) \
- GOTO_INCOMPLETE; \
- if (sizeof(str)-1 > left) { \
- if (memcmp(ptr, str, left)) \
- GOTO_REDIR; \
- GOTO_INCOMPLETE; \
- } \
- \
- if (memcmp(ptr, str, sizeof(str)-1)) \
- __ret = 0; \
- else { \
- ptr += sizeof(str)-1; \
- left -= sizeof(str)-1; \
- __ret = 1; \
- } \
- __ret; \
- })
-
-#define PARSE_METHOD(req,ptr,name,left) \
- ({ \
- int __ret; \
- \
- if (PARSE_TOKEN(ptr,#name" ",left)) { \
- req->method = METHOD_##name; \
- __ret = 1; \
- } else \
- __ret = 0; \
- __ret; \
- })
-
-#define COPY_LINE(ptr,target,left) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') \
- *target++ = prev_c = c; \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define COPY_LINE_TOLOWER(ptr,target,left,limit) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') { \
- if ((c >= 'A') && (c <= 'Z')) \
- c -= 'A'-'a'; \
- *target++ = prev_c = c; \
- if (target == (limit)) \
- GOTO_REDIR; \
- } \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define COPY_FIELD(ptr,target,left) \
- do { \
- char c; \
- while ((c = get_c(ptr,left)) != ' ') \
- *target++ = c; \
- } while (0)
-
-#define SKIP_LINE(ptr,left) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') \
- prev_c = c; \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define SKIP_WHITESPACE(curr,left) \
-do { \
- while ((left) && (*(curr) == ' ')) \
- (curr)++, (left)--; \
- if (!(left)) \
- GOTO_REDIR; \
-} while (0)
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * postpone.c: postpone/continue userspace requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void postpone_request (tux_req_t *req)
-{
- if (!req->usermode)
- TUX_BUG();
- INC_STAT(nr_postpone_pending);
- req->postponed = 1;
-}
-
-/*
- * Continue a postponed request. The request will show up in the
- * userspace queue and will be handled by the fast thread.
- * A request can only be postponed in a TUX process, but can be
- * continued from any process that has access to the socket file
- * descriptor.
- */
-int continue_request (int fd)
-{
- threadinfo_t *ti;
- struct socket *sock;
- tux_req_t *req;
- int err;
-
- sock = sockfd_lookup(fd, &err);
- if (!sock || !sock->sk)
- goto out;
- req = sock->sk->sk_user_data;
-
- err = -EINVAL;
- if (!req)
- goto out_put;
- ti = req->ti;
- if (!req->postponed)
- goto out_unlock_put;
- if (!req->usermode)
- TUX_BUG();
-
- req->postponed = 0;
- DEC_STAT(nr_postpone_pending);
-
- Dprintk("continuing postponed req %p.\n", req);
- add_req_to_workqueue(req);
-
-out_unlock_put:
- err = 0;
-out_put:
- fput(sock->file);
-out:
- return err;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * proc.c: /proc/sys/tux handling
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char tux_common_docroot[200] = "/var/www/tux/";
-char tux_http_subdocroot[200] = "";
-char tux_ftp_subdocroot[200] = "";
-char tux_logfile[200] = "/var/log/tux";
-char tux_cgiroot[200] = "/var/www/tux/cgiroot/";
-char tux_404_page[200] = "404.html";
-char tux_default_vhost[200] = "default";
-char tux_extra_html_header[600];
-unsigned int tux_extra_html_header_size = 0;
-
-int tux_cgi_uid = -1;
-int tux_cgi_gid = -1;
-unsigned int tux_clientport = 8080;
-unsigned int tux_logging = 0;
-unsigned int tux_threads = 2;
-unsigned int tux_max_connect = 10000;
-unsigned int tux_max_keepalives = 10000;
-unsigned int tux_max_backlog = 2048;
-unsigned int tux_keepalive_timeout = 0;
-unsigned int tux_max_output_bandwidth = 0;
-unsigned int tux_defer_accept = 1;
-unsigned int tux_mode_forbidden = 0 /*S_IXUGO*/; /* do not allow executable (CGI) files */
-unsigned int tux_mode_allowed = S_IROTH; /* allow access if read-other is set */
-unsigned int tux_virtual_server = 0;
-unsigned int tux_ftp_virtual_server = 0;
-unsigned int mass_hosting_hash = 0;
-unsigned int strip_host_tail = 0;
-unsigned int tux_max_object_size = 0;
-unsigned int log_cpu_mask = ~0;
-unsigned int tux_compression = 0;
-unsigned int tux_noid = 0;
-unsigned int tux_cgi_inherit_cpu = 0;
-unsigned int tux_cgi_cpu_mask = ~0;
-unsigned int tux_zerocopy_header = 1;
-unsigned int tux_max_free_requests = 1000;
-unsigned int tux_ignore_query = 0;
-unsigned int tux_all_userspace = 0;
-unsigned int tux_redirect_logging = 1;
-unsigned int tux_max_header_len = 3000;
-unsigned int tux_referer_logging = 0;
-unsigned int tux_generate_etags = 1;
-unsigned int tux_generate_last_mod = 1;
-unsigned int tux_generate_cache_control = 1;
-unsigned int tux_ip_logging = 1;
-unsigned int tux_ftp_wait_close = 1;
-unsigned int tux_ftp_log_retr_only = 0;
-unsigned int tux_hide_unreadable = 1;
-unsigned int tux_http_dir_indexing = 0;
-unsigned int tux_log_incomplete = 0;
-unsigned int tux_cpu_offset = 0;
-unsigned int tux_ftp_login_message = 0;
-
-static struct ctl_table_header *tux_table_header;
-
-static ctl_table tux_table[] = {
- { NET_TUX_DOCROOT,
- "documentroot",
- &tux_common_docroot,
- sizeof(tux_common_docroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DOCROOT,
- "http_subdocroot",
- &tux_http_subdocroot,
- sizeof(tux_http_subdocroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DOCROOT,
- "ftp_subdocroot",
- &tux_ftp_subdocroot,
- sizeof(tux_ftp_subdocroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGFILE,
- "logfile",
- &tux_logfile,
- sizeof(tux_logfile),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_THREADS,
- "threads",
- &tux_threads,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_KEEPALIVE_TIMEOUT,
- "keepalive_timeout",
- &tux_keepalive_timeout,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_KEEPALIVE_BW,
- "max_output_bandwidth",
- &tux_max_output_bandwidth,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DEFER_ACCEPT,
- "defer_accept",
- &tux_defer_accept,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_BACKLOG,
- "max_backlog",
- &tux_max_backlog,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_CONNECT,
- "max_connect",
- &tux_max_connect,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_KEEPALIVES,
- "max_keepalives",
- &tux_max_keepalives,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MODE_FORBIDDEN,
- "mode_forbidden",
- &tux_mode_forbidden,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MODE_ALLOWED,
- "mode_allowed",
- &tux_mode_allowed,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_UID,
- "cgi_uid",
- &tux_cgi_uid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_GID,
- "cgi_gid",
- &tux_cgi_gid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGIROOT,
- "cgiroot",
- &tux_cgiroot,
- sizeof(tux_cgiroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "404_page",
- &tux_404_page,
- sizeof(tux_404_page),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "default_vhost",
- &tux_default_vhost,
- sizeof(tux_default_vhost),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "extra_html_header",
- &tux_extra_html_header,
- sizeof(tux_extra_html_header),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "extra_html_header_size",
- &tux_extra_html_header_size,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "clientport",
- &tux_clientport,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_etags",
- &tux_generate_etags,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_last_mod",
- &tux_generate_last_mod,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_cache_control",
- &tux_generate_cache_control,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ip_logging",
- &tux_ip_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ftp_wait_close",
- &tux_ftp_wait_close,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ftp_log_retr_only",
- &tux_ftp_log_retr_only,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "http_dir_indexing",
- &tux_http_dir_indexing,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "hide_unreadable",
- &tux_hide_unreadable,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "log_incomplete",
- &tux_log_incomplete,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGGING,
- "TDprintk",
- &tux_TDprintk,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGGING,
- "Dprintk",
- &tux_Dprintk,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
-#if TUX_DPRINTK
-#endif
- { NET_TUX_LOGGING,
- "logging",
- &tux_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGENTRY_ALIGN_ORDER,
- "logentry_align_order",
- &tux_logentry_align_order,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ACK_PINGPONG,
- "ack_pingpong",
- &tux_ack_pingpong,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_PUSH_ALL,
- "push_all",
- &tux_push_all,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_PARSE,
- "zerocopy_parse",
- &tux_zerocopy_parse,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "virtual_server",
- &tux_virtual_server,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "mass_hosting_hash",
- &mass_hosting_hash,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "strip_host_tail",
- &strip_host_tail,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "ftp_virtual_server",
- &tux_ftp_virtual_server,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_OBJECT_SIZE,
- "max_object_size",
- &tux_max_object_size,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_COMPRESSION,
- "compression",
- &tux_compression,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_NOID,
- "noid",
- &tux_noid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_INHERIT_CPU,
- "cgi_inherit_cpu",
- &tux_cgi_inherit_cpu,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_CPU_MASK,
- "cgi_cpu_mask",
- &tux_cgi_cpu_mask,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_HEADER,
- "zerocopy_header",
- &tux_zerocopy_header,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_SENDFILE,
- "zerocopy_sendfile",
- &tux_zerocopy_sendfile,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_FREE_REQUESTS,
- "max_free_requests",
- &tux_max_free_requests,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ALL_USERSPACE,
- "all_userspace",
- &tux_all_userspace,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REDIRECT_LOGGING,
- "redirect_logging",
- &tux_redirect_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_IGNORE_QUERY,
- "ignore_query",
- &tux_ignore_query,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "referer_logging",
- &tux_referer_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "cpu_offset",
- &tux_cpu_offset,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "ftp_login_message",
- &tux_ftp_login_message,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_HEADER_LEN,
- "max_header_len",
- &tux_max_header_len,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- {0,0,0,0,0,0,0,0,0,0,0} };
-
-
-static ctl_table tux_dir_table[] = {
- {NET_TUX, "tux", NULL, 0, 0555, tux_table,0,0,0,0,0},
- {0,0,0,0,0,0,0,0,0,0,0}
-};
-
-static ctl_table tux_root_table[] = {
- {CTL_NET, "net", NULL, 0, 0555, tux_dir_table,0,0,0,0,0},
- {0,0,0,0,0,0,0,0,0,0,0}
-};
-
-
-static struct proc_dir_entry * root_tux_dir;
-static struct proc_dir_entry * log_cpu_mask_entry;
-static struct proc_dir_entry * stat_entry;
-static struct proc_dir_entry * tux_dir [CONFIG_TUX_NUMTHREADS];
-static struct proc_dir_entry * listen_dir [CONFIG_TUX_NUMTHREADS];
-
-tux_socket_t tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS] =
- { [0 ... CONFIG_TUX_NUMTHREADS-1] = { {&tux_proto_http, 0, 80, NULL}, } };
-
-#define HEX_DIGITS 8
-
-static int hex_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- if (count < HEX_DIGITS+1)
- return -EINVAL;
- return sprintf (page, "%08x\n", *(unsigned int *)data);
-}
-
-static int hex_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char hexnum [HEX_DIGITS];
- unsigned int new_value;
- unsigned int i, full_count = count;
-
- if (!count)
- return -EINVAL;
- if (count > HEX_DIGITS)
- count = HEX_DIGITS;
- if (copy_from_user(hexnum, buffer, count))
- return -EFAULT;
-
- /*
- * Parse the first 8 characters as a hex string, any non-hex char
- * is end-of-string. '00e1', 'e1', '00E1', 'E1' are the same.
- */
- new_value = 0;
-
- for (i = 0; i < count; i++) {
- unsigned int c = hexnum[i];
-
- switch (c) {
- case '0' ... '9': c -= '0'; break;
- case 'a' ... 'f': c -= 'a'-10; break;
- case 'A' ... 'F': c -= 'A'-10; break;
- default:
- goto out;
- }
- new_value = (new_value << 4) | c;
- }
-out:
- *(int *)data = new_value;
-
- return full_count;
-}
-
-#define LINE_SIZE 1024
-#define LINE_MASK (LINE_SIZE-1)
-
-static int print_request_stats (threadinfo_t *ti, char *page, unsigned int skip_count, unsigned int max_count)
-{
- struct list_head *head, *curr;
- tux_req_t *req;
- unsigned int count = 0, size, line_off, len;
- char stat_line [LINE_SIZE];
-
- if (!max_count)
- BUG();
-
- head = &ti->all_requests;
- curr = head->next;
-
- while (curr != head) {
- req = list_entry(curr, tux_req_t, all);
- curr = curr->next;
- count++;
- if (count <= skip_count)
- continue;
- line_off = 0;
-#define SP(x...) \
- line_off += sprintf(stat_line + line_off, x)
-
- if (req->proto == &tux_proto_http)
- SP("0 ");
- else
- SP("1 ");
-
- SP("%p ", req);
- SP("%d ", req->atom_idx);
- if (req->atom_idx >= 1)
- SP("%p ", req->atoms[0]);
- else
- SP("........ ");
- if (req->atom_idx >= 2)
- SP("%p ", req->atoms[1]);
- else
- SP("........ ");
- if (!list_empty(&req->work)) SP("W"); else SP(".");
- if (!list_empty(&req->free)) SP("F"); else SP(".");
- if (!list_empty(&req->lru)) SP("L"); else SP(".");
- if (req->keep_alive) SP("K"); else SP(".");
- if (req->idle_input) SP("I"); else SP(".");
- if (timer_pending(&req->keepalive_timer))
- SP("T(%lu/%lu)",jiffies,req->keepalive_timer.expires); else SP(".");
- if (req->wait_output_space) SP("O"); else SP(".");
- if (timer_pending(&req->output_timer))
- SP("T"); else SP(".");
- SP(" %d ", req->error);
- SP(" %d ", req->status);
-
-#define SP_HOST(ip,port) \
- SP("%d.%d.%d.%d:%d ",NIPQUAD(ip),port)
-
- if (req->sock) {
- if (req->sock->sk)
- SP("%d:", req->sock->sk->sk_state);
- else
- SP("-2:");
- } else
- SP("-1:");
- SP_HOST(req->client_addr, req->client_port);
-
- SP("%Ld ", req->total_file_len);
- SP("%Ld ", req->in_file.f_pos);
- if (req->proto == &tux_proto_http) {
- SP("%d ", req->method);
- SP("%d ", req->version);
- }
- if (req->proto == &tux_proto_ftp) {
- SP("%d ", req->ftp_command);
- if (req->data_sock) {
- if (req->data_sock->sk)
- SP("%d:",req->data_sock->sk->sk_state);
- else
- SP("-2:");
- if (req->data_sock->sk)
- SP_HOST(inet_sk(req->data_sock->sk)->daddr,
- inet_sk(req->data_sock->sk)->dport);
- else
- SP("-1:-1 ");
- } else
- SP("-1 ");
- }
- SP("%p/%p %p/%p ", req->sock, req->sock ? req->sock->sk : (void *)-1, req->data_sock, req->data_sock ? req->data_sock->sk : (void *)-1);
-
- SP("%d\n", req->parsed_len);
- len = req->headers_len;
- if (len > 500)
- len = 500;
- SP("\n%d\n", len);
- memcpy(stat_line + line_off, req->headers, len);
- line_off += len;
- len = req->objectname_len;
- if (len > 100)
- len = 100;
- SP("\n%d\n", len);
- memcpy(stat_line + line_off, req->objectname, len);
- line_off += len;
- SP("\n\n<END>");
- if (line_off >= LINE_SIZE)
- BUG();
- Dprintk("printing req %p, count %d, page %p: {%s}.\n", req, count, page, stat_line);
- size = sprintf(page, "%-*s\n", LINE_SIZE-1, stat_line);
- if (size != LINE_SIZE)
- BUG();
- page += LINE_SIZE;
- if (count-skip_count >= max_count)
- break;
- }
-
- Dprintk("count: %d.\n", count-skip_count);
- return count - skip_count;
-}
-
-static int stat_read_proc (char *page, char **start, off_t off,
- int max_size, int *eof, void *data)
-{
- unsigned int i, nr_total = 0, nr, nr_off, nr_skip, size = 0, nr_wanted;
-
- Dprintk("START, page: %p, max_size: %d, off: %ld.\n", page, max_size, off);
- *eof = 1;
- if (max_size & LINE_MASK)
- return 0;
- if (off & LINE_MASK)
- return 0;
- if (!max_size)
- return 0;
-
- nr_off = off/LINE_SIZE;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- spin_lock_irq(&ti->work_lock);
- nr = ti->nr_requests;
- Dprintk("ti: %p, nr: %d, nr_total: %d, nr_off: %d.\n", ti, nr, nr_total, nr_off);
- nr_total += nr;
- if (nr_off >= nr_total) {
- spin_unlock_irq(&ti->work_lock);
- continue;
- }
- nr_skip = nr_off - (nr_total - nr);
- nr_wanted = (max_size-size) / LINE_SIZE;
- Dprintk("nr_skip: %d, nr_wanted: %d.\n", nr_skip, nr_wanted);
- nr = print_request_stats(ti, page + size, nr_skip, nr_wanted);
- spin_unlock_irq(&ti->work_lock);
- nr_off += nr;
- size += nr * LINE_SIZE;
- Dprintk("ret: %d requests, size: %d.\n", nr, size);
- if (size > max_size)
- BUG();
- if (size == max_size)
- break;
- }
- Dprintk("DONE: size: %d.\n", size);
-
- *start = page;
-
- if (size)
- *eof = 0;
- return size;
-}
-
-static int stat_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- return -EINVAL;
-}
-
-#define MAX_STRING "http://255.255.255.255:65535"
-#define MAX_STRINGLEN (sizeof(MAX_STRING))
-
-#define INACTIVE_1 "[inactive]\n"
-#define INACTIVE_2 "0\n"
-
-static int listen_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- tux_socket_t *listen = data;
-
- if (count < MAX_STRINGLEN)
- return -EINVAL;
-
- if (!listen->proto)
- return sprintf(page, INACTIVE_1);
-
- return sprintf (page, "%s://%u.%u.%u.%u:%hu\n", listen->proto->name,
- HIPQUAD(listen->ip), listen->port);
-}
-
-static int listen_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char string [MAX_STRINGLEN];
- unsigned int d1, d2, d3, d4;
- unsigned short port;
- tux_socket_t *listen = data;
-
- if (!count)
- return -EINVAL;
- if (count > MAX_STRINGLEN)
- count = MAX_STRINGLEN;
- if (copy_from_user(string, buffer, count))
- return -EFAULT;
- string[count] = 0;
-
- if (!strcmp(string, INACTIVE_1) || !strcmp(string, INACTIVE_2)) {
- listen->proto = NULL;
- listen->ip = 0;
- listen->port = 0;
- return count;
- }
-
-#define MK_IP(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | d)
-
- if (sscanf(string, "http://%u.%u.%u.%u:%hu\n",
- &d1, &d2, &d3, &d4, &port) == 5) {
- listen->ip = MK_IP(d1,d2,d3,d4);
- listen->port = port;
- listen->proto = &tux_proto_http;
- return count;
- }
-
- if (sscanf(string, "ftp://%u.%u.%u.%u:%hu\n",
- &d1, &d2, &d3, &d4, &port) == 5) {
- listen->ip = MK_IP(d1,d2,d3,d4);
- listen->port = port;
- listen->proto = &tux_proto_ftp;
- return count;
- }
- printk(KERN_ERR "tux: invalid listen-socket parameters: %s\n", string);
- return -EINVAL;
-}
-
-#define MAX_NAMELEN 10
-
-static void register_tux_proc (unsigned int nr)
-{
- struct proc_dir_entry *entry;
- char name [MAX_NAMELEN];
- int i;
-
- if (!root_tux_dir)
- TUX_BUG();
-
- sprintf(name, "%d", nr);
-
- /* create /proc/net/tux/1234/ */
- tux_dir[nr] = proc_mkdir(name, root_tux_dir);
-
- /* create /proc/net/tux/1234/listen/ */
- listen_dir[nr] = proc_mkdir("listen", tux_dir[nr]);
-
- /* create /proc/net/tux/1234/listen/ */
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- sprintf(name, "%d", i);
- entry = create_proc_entry(name, 0700, listen_dir[nr]);
-
- entry->nlink = 1;
- entry->data = (void *)(tux_listen[nr] + i);
- entry->read_proc = listen_read_proc;
- entry->write_proc = listen_write_proc;
- tux_listen[nr][i].entry = entry;
- }
-}
-
-static void unregister_tux_proc (unsigned int nr)
-{
- int i;
-
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- remove_proc_entry(tux_listen[nr][i].entry->name,listen_dir[nr]);
- tux_listen[nr][i].entry = NULL;
- }
-
- remove_proc_entry(listen_dir[nr]->name, tux_dir[nr]);
-
- remove_proc_entry(tux_dir[nr]->name, root_tux_dir);
-}
-
-static void cleanup_tux_proc (void)
-{
- int i;
-
- Dprintk("cleaning up /proc/net/tux/\n");
-
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++)
- unregister_tux_proc(i);
- remove_proc_entry(stat_entry->name, root_tux_dir);
- remove_proc_entry(log_cpu_mask_entry->name, root_tux_dir);
- remove_proc_entry(root_tux_dir->name, proc_net);
-}
-
-static void init_tux_proc (void)
-{
- struct proc_dir_entry *entry;
- int i;
-
- if (root_tux_dir)
- return;
-
- /* create /proc/net/tux */
- root_tux_dir = proc_mkdir("tux", proc_net);
-
- entry = create_proc_entry("log_cpu_mask", 0700, root_tux_dir);
-
- entry->nlink = 1;
- entry->data = (void *)&log_cpu_mask;
- entry->read_proc = hex_read_proc;
- entry->write_proc = hex_write_proc;
-
- log_cpu_mask_entry = entry;
-
- entry = create_proc_entry("stat", 0700, root_tux_dir);
-
- entry->nlink = 1;
- entry->data = NULL;
- entry->read_proc = stat_read_proc;
- entry->write_proc = stat_write_proc;
-
- stat_entry = entry;
-
- /*
- * Create entries for all existing threads.
- */
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++)
- register_tux_proc(i);
-}
-
-void start_sysctl(void)
-{
- init_tux_proc();
- tux_table_header = register_sysctl_table(tux_root_table,1);
-}
-
-void end_sysctl(void)
-{
- cleanup_tux_proc();
- unregister_sysctl_table(tux_table_header);
-}
-
-#if CONFIG_SMP
-void mask_to_cpumask(unsigned int mask, cpumask_t *cpu_mask)
-{
-
- unsigned int bit_mask, i;
-
- bit_mask = 1 << 31;
-
- for (i=NR_CPUS-1; i--; i >= 0) {
- if(mask & bit_mask)
- cpu_set(i, *cpu_mask);
- else
- cpu_clear(i, *cpu_mask);
- mask <<= 1;
- }
-
-}
-#endif
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * ftp_proto.c: FTP application protocol support
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#define HELLO "220 Linux/TUX 3.0 FTP server welcomes you!\r\n"
-#define WRITE_DONE "226 Transfer complete.\r\n"
-#define BAD_FILENAME "550 No such file or directory.\r\n"
-#define GOOD_DIR "250 CWD command successful.\r\n"
-#define LIST_ERR "503 LIST without PORT! Closing connection.\r\n"
-#define LIST_ERR_MEM "503 LIST could not allocate memory! Closing connection.\r\n"
-#define WRITE_FILE "150 Opening BINARY mode data connection.\r\n"
-#define WRITE_LIST "150 Opening ASCII mode data connection.\r\n"
-#define RETR_ERR "503 RETR without PORT! Closing connection.\r\n"
-#define PORT_OK "200 PORT command successful.\r\n"
-#define LOGIN_OK "230-There are currently %d users logged in, out of %d maximum.\r\n230-Bandwidth served by TUX currently: %d KB/sec\r\n230 TUX Guest login ok.\r\n"
-#define LOGIN_OK_ONE "230-There is currently 1 user logged in, out of %d maximum.\r\n230-Bandwidth served by TUX currently: %d KB/sec\r\n230 TUX Guest login ok.\r\n"
-#define LOGIN_OK_PASS "230 TUX Guest login ok.\r\n"
-#define LOGIN_FORBIDDEN "530 Sorry, Login Denied!\r\n"
-#define TYPE_OK "200 Type set to I.\r\n"
-#define BYE "221 Thank You for using TUX!\r\n"
-#define NOT_IMPLEMENTED "502 Command not implemented.\r\n"
-#define CLOSE_2 "221 Cannot handle request, closing connection!\r\n"
-#define CLOSE "500 Unknown command.\r\n"
-#define CLOSE_TIMEOUT "421 Timeout, closing connection!\r\n"
-#define LINUX_SYST "215 UNIX Type: L8, Linux/TUX/3.0\r\n"
-#define COMMAND_OK "200 Command OK.\r\n"
-#define REST_OK "350 Restart offset OK.\r\n"
-#define WRITE_ABORTED "426 Transfer aborted, data connection closed.\r\n"
-#define SITE "214 No SITE commands are recognized.\r\n"
-
-#define INTERVAL 10
-
-unsigned long last_measurement;
-unsigned int ftp_bytes_sent;
-unsigned int ftp_bandwidth;
-
-static void __update_bandwidth (tux_req_t *req, unsigned int bytes)
-{
- /*
- * Bandwidth measurement. Not completely accurate,
- * but it's good enough and lightweight enough.
- */
- if (jiffies >= last_measurement + INTERVAL*HZ) {
- ftp_bandwidth = (ftp_bytes_sent + 1023)/INTERVAL/1024;
- ftp_bytes_sent = 0;
- last_measurement = jiffies;
- }
- if (bytes)
- atomic_add(bytes, (atomic_t *)&ftp_bytes_sent);
- Dprintk("update_bandwidth(%p,%d), bytes_sent: %d, bandwidth: %d.\n",
- req, bytes, ftp_bytes_sent, ftp_bandwidth);
-}
-
-#define update_bandwidth(req,bytes) \
- do { \
- if (unlikely(tux_ftp_login_message)) \
- __update_bandwidth(req, bytes); \
- } while (0)
-
-static inline void __ftp_send_async_message (tux_req_t *req,
- const char *message, int status, unsigned int size)
-{
- update_bandwidth(req, size);
- __send_async_message(req, message, status, size, 1);
-}
-
-#define ftp_send_async_message(req,str,status) \
- __ftp_send_async_message(req,str,status,sizeof(str)-1)
-
-
-static void ftp_flush_req (tux_req_t *req, int cachemiss)
-{
- tux_push_pending(req->sock->sk);
- add_req_to_workqueue(req);
-}
-
-static void ftp_execute_command (tux_req_t *req, int cachemiss);
-
-static void ftp_lookup_vhost (tux_req_t *req, int cachemiss)
-{
- struct dentry *dentry;
- struct nameidata base;
- struct vfsmount *mnt = NULL;
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- char ip[3+1+3+1+3+1+3 + 2];
-
- sprintf(ip, "%d.%d.%d.%d", NIPQUAD(inet_sk(req->sock->sk)->rcv_saddr));
- Dprintk("ftp_lookup_vhost(%p, %d, virtual: %d, host: %s.)\n",
- req, flag, req->virtual, ip);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->proto->main_docroot.dentry);
- base.mnt = mntget(req->proto->main_docroot.mnt);
-
- dentry = __tux_lookup(req, ip, &base, &mnt);
-
- Dprintk("looked up dentry %p.\n", dentry);
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- add_tux_atom(req, ftp_lookup_vhost);
- queue_cachemiss(req);
- return;
- }
- goto abort;
- }
-
- req->docroot_dentry = dentry;
- req->docroot_mnt = mnt;
-
- add_tux_atom(req, ftp_execute_command);
- add_req_to_workqueue(req);
- return;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- add_req_to_workqueue(req);
-}
-
-static void ftp_got_request (tux_req_t *req)
-{
- add_tux_atom(req, parse_request);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, HELLO, 220);
-}
-
-#define GOTO_ERR { TDprintk("FTP protocol error at: %s:%d\n", \
- __FILE__, __LINE__); goto error; }
-
-static void zap_data_socket (tux_req_t *req)
-{
- if (!req->data_sock)
- return;
- Dprintk("zapping req %p's data socket %p.\n", req, req->data_sock);
-
- unlink_tux_data_socket(req);
- sock_release(req->data_sock);
- req->data_sock = NULL;
-}
-
-static int parse_ftp_message (tux_req_t *req, const int total_len)
-{
- int comm, comm1 = 0, comm2 = 0, comm3 = 0, comm4 = 0;
- int newline_pos, i;
- const char *mess, *curr;
-
- curr = mess = req->headers;
-
- Dprintk("FTP parser got %d bytes: --->{%s}<---\n", total_len, curr);
-
- newline_pos = -1;
- for (i = 0; i < total_len; i++, curr++) {
- if (!*curr)
- GOTO_ERR;
- if (!(*curr == '\r') || !(*(curr+1) == '\n'))
- continue;
- newline_pos = i;
- break;
- }
- Dprintk("Newline pos: %d\n", newline_pos);
- if (newline_pos == -1) {
- Dprintk("incomplete mess on req %p!\n", req);
- return 0;
- }
- if (newline_pos < 3)
- GOTO_ERR;
-
-#define toup(c) ((((c) >= 'a') && ((c) <= 'z')) ? ((c) + 'A' - 'a') : (c))
-
-#define STRING_VAL(c1,c2,c3,c4) \
- (toup(c1) + (toup(c2) << 8) + (toup(c3) << 16) + (toup(c4) << 24))
-
-#define STRING_VAL_STR(str) \
- STRING_VAL(str[0], str[1], str[2], str[3])
-
- Dprintk("string val (%c%c%c%c): %08x\n",
- mess[0], mess[1], mess[2], mess[3],
- STRING_VAL_STR(mess));
-
-#define PARSE_FTP_COMM(c1,c2,c3,c4,name,num) \
- if (STRING_VAL_STR(mess) == STRING_VAL(c1,c2,c3,c4)) \
- { \
- Dprintk("parsed "#name".\n"); \
- comm##num = FTP_COMM_##name; \
- }
-
- PARSE_FTP_COMM('A','C','C','T', ACCT,2);
- PARSE_FTP_COMM('C','D','U','P', CDUP,3);
- PARSE_FTP_COMM('S','M','N','T', SMNT,4);
- PARSE_FTP_COMM('Q','U','I','T', QUIT,1);
- PARSE_FTP_COMM('R','E','I','N', REIN,2);
- PARSE_FTP_COMM('P','A','S','V', PASV,3);
- PARSE_FTP_COMM('S','T','R','U', STRU,4);
- PARSE_FTP_COMM('S','T','O','R', STOR,2);
- PARSE_FTP_COMM('S','T','O','U', STOU,3);
- PARSE_FTP_COMM('A','P','P','E', APPE,4);
- PARSE_FTP_COMM('A','L','L','O', ALLO,1);
- PARSE_FTP_COMM('R','N','F','R', RNFR,2);
- PARSE_FTP_COMM('R','N','T','O', RNTO,3);
- PARSE_FTP_COMM('A','B','O','R', ABOR,4);
- PARSE_FTP_COMM('D','E','L','E', DELE,1);
- PARSE_FTP_COMM('R','M','D',' ', RMD, 2);
- PARSE_FTP_COMM('M','K','D',' ', MKD, 3);
- PARSE_FTP_COMM('P','W','D',' ', PWD, 4);
- PARSE_FTP_COMM('S','Y','S','T', SYST,2);
- PARSE_FTP_COMM('N','O','O','P', NOOP,3);
- PARSE_FTP_COMM('F','E','A','T', FEAT,4);
-
- comm = comm1 | comm2 | comm3 | comm4;
-
- if (comm) {
- if (newline_pos != 4)
- GOTO_ERR;
- req->ftp_command = comm;
- goto out;
- }
-
- switch (STRING_VAL(mess[0], mess[1], mess[2], mess[3])) {
-
-#define PARSE_FTP_COMM_3CHAR(c1,c2,c3,name) \
- case STRING_VAL(c1,c2,c3,'\r'): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos != 3) \
- GOTO_ERR; \
- }
-
-#define PARSE_FTP_3CHAR_COMM_IGNORE(c1,c2,c3,name) \
- case STRING_VAL(c1,c2,c3,' '): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- }
-
-#define PARSE_FTP_COMM_IGNORE(c1,c2,c3,c4,name) \
- case STRING_VAL(c1,c2,c3,c4): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- }
-
-#define PARSE_FTP_3CHAR_COMM_1_FIELD(c1,c2,c3,name,field,field_len,max) \
- case STRING_VAL(c1,c2,c3,' '): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos == 4) \
- GOTO_ERR; \
- if (newline_pos >= 5) { \
- curr = mess + 3; \
- if (*curr++ != ' ') \
- GOTO_ERR; \
- *(field_len) = newline_pos-4; \
- if (*(field_len) >= max) \
- GOTO_ERR; \
- memcpy(field, curr, *(field_len)); \
- (field)[*(field_len)] = 0; \
- } \
- }
-
-#define PARSE_FTP_COMM_1_FIELD(c1,c2,c3,c4,name,field,field_len,max) \
- case STRING_VAL(c1,c2,c3,c4): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos < 4) \
- GOTO_ERR; \
- if (newline_pos == 4) \
- *(field_len) = 0; \
- else { \
- curr = mess + 4; \
- if (*curr++ != ' ') \
- GOTO_ERR; \
- *(field_len) = newline_pos-5; \
- if (*(field_len) >= max) \
- GOTO_ERR; \
- memcpy(field, curr, *(field_len)); \
- (field)[*(field_len)] = 0; \
- } \
- }
-
- PARSE_FTP_COMM_1_FIELD('U','S','E','R', USER,
- req->username, &req->username_len,
- MAX_USERNAME_LEN-1);
- if (!req->username_len)
- GOTO_ERR;
- break;
-
- {
- #define MAX_PASS_LEN 100
- char pass[MAX_PASS_LEN];
- unsigned int pass_len;
- PARSE_FTP_COMM_1_FIELD('P','A','S','S', PASS,
- pass, &pass_len,
- MAX_PASS_LEN-1);
- if (!pass_len)
- GOTO_ERR;
- break;
- }
-
- PARSE_FTP_3CHAR_COMM_1_FIELD('C','W','D', CWD,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- GOTO_ERR;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_3CHAR('P','W','D', PWD); break;
-
- {
- char type[3];
- unsigned int type_len;
-
- PARSE_FTP_COMM_1_FIELD('T','Y','P','E', TYPE,
- type, &type_len, 2);
- if (!type_len)
- GOTO_ERR;
- if ((type[0] != 'I') && (type[0] != 'A'))
- GOTO_ERR;
- }
- break;
-
- PARSE_FTP_COMM_1_FIELD('R','E','T','R', RETR,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len) {
- zap_data_socket(req);
- req->ftp_command = FTP_COMM_NONE;
- }
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_1_FIELD('S','I','Z','E', SIZE,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- req->ftp_command = FTP_COMM_NONE;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_1_FIELD('M','D','T','M', MDTM,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- req->ftp_command = FTP_COMM_NONE;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_IGNORE('M','O','D','E', MODE);
- break;
-
- PARSE_FTP_COMM_IGNORE('S','T','A','T', STAT);
- break;
-
- PARSE_FTP_COMM_IGNORE('S','I','T','E', SITE);
- break;
-
- PARSE_FTP_COMM_1_FIELD('L','I','S','T', LIST,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (req->objectname[0] == '-') {
- req->objectname_len = 0;
- req->objectname[0] = 0;
- }
- if (req->objectname_len) {
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- }
- break;
-
- PARSE_FTP_COMM_1_FIELD('N','L','S','T', NLST,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (req->objectname[0] == '-') {
- req->objectname_len = 0;
- req->objectname[0] = 0;
- }
- if (req->objectname_len) {
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- }
- break;
-
- PARSE_FTP_COMM_IGNORE('H','E','L','P', HELP);
- break;
-
- PARSE_FTP_COMM_IGNORE('C','L','N','T', CLNT);
- break;
-
-#define IS_NUM(n) (((n) >= '0') && ((n) <= '9'))
-
-#define GET_DIGIT(curr,n) \
- n += (*curr) - '0'; \
- curr++; \
- if (IS_NUM(*curr)) { \
- n *= 10;
-
-#define PARSE_PORTNUM(curr,n) \
-do { \
- Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\
- if (!IS_NUM(*curr)) \
- GOTO_ERR; \
- n = 0; \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- }}} \
- if (n > 255) \
- GOTO_ERR; \
- Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\
- Dprintk("PORT NUM parser parsed %d.\n", n); \
-} while (0)
-
-#define PARSE_NUM(curr,n) \
-do { \
- Dprintk("NUM parser:--->{%s}<---\n", curr); \
- if (!IS_NUM(*curr)) \
- GOTO_ERR; \
- n = 0; \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- }}}}}}}}}} \
- Dprintk("NUM parser:--->{%s}<---\n", curr); \
- Dprintk("NUM parser parsed %d.\n", n); \
-} while (0)
-
- case STRING_VAL('P','O','R','T'):
- {
- unsigned int h1, h2, h3, h4, p1, p2;
- if (req->data_sock)
- zap_data_socket(req);
- /*
- * Minimum size: "PORT 0,0,0,0,0,0", 16 bytes.
- */
- if (newline_pos < 16)
- GOTO_ERR;
- Dprintk("parsed PORT.\n");
- if (req->data_sock)
- GOTO_ERR;
- curr = mess + 4;
- if (*curr++ != ' ')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h1);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h2);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h3);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h4);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,p1);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,p2);
- if (curr-mess != newline_pos)
- GOTO_ERR;
- req->ftp_command = FTP_COMM_PORT;
- req->ftp_user_addr = (h1<<24) + (h2<<16) + (h3<<8) + h4;
- req->ftp_user_port = (p1<<8) + p2;
- Dprintk("FTP PORT got: %d.%d.%d.%d:%d.\n",
- h1, h2, h3, h4, req->ftp_user_port);
- Dprintk("FTP user-addr: %08x (htonl: %08x), socket: %08x.\n",
- req->ftp_user_addr, htonl(req->ftp_user_addr),
- inet_sk(req->sock->sk)->daddr);
- /*
- * Do not allow redirection of connections, and do
- * not allow reserved ports to be accessed.
- */
- if (inet_sk(req->sock->sk)->daddr != htonl(req->ftp_user_addr))
- GOTO_ERR;
- if (req->ftp_user_port < 1024)
- GOTO_ERR;
- break;
- }
- case STRING_VAL('R','E','S','T'):
- {
- unsigned int offset;
-
- /*
- * Minimum size: "REST 0", 6 bytes.
- */
- if (newline_pos < 6)
- GOTO_ERR;
- Dprintk("parsed REST.\n");
- curr = mess + 4;
- if (*curr++ != ' ')
- GOTO_ERR;
- PARSE_NUM(curr,offset);
- if (curr-mess != newline_pos)
- GOTO_ERR;
- req->ftp_command = FTP_COMM_REST;
- req->ftp_offset_start = offset;
- Dprintk("FTP REST got: %d bytes offset.\n", offset);
-
- break;
- }
- default:
- req->ftp_command = FTP_COMM_NONE;
- break;
- }
-
-out:
- req->parsed_len = newline_pos + 2;
-
- req->virtual = tux_ftp_virtual_server;
- if (req->virtual)
- add_tux_atom(req, ftp_lookup_vhost);
- else {
- req->docroot_dentry = dget(req->proto->main_docroot.dentry);
- req->docroot_mnt = mntget(req->proto->main_docroot.mnt);
- add_tux_atom(req, ftp_execute_command);
- }
-
- return req->parsed_len;
-error:
- clear_keepalive(req);
- TDprintk("rejecting FTP session!\n");
- TDprintk("mess :--->{%s}<---\n", mess);
- TDprintk("mess left:--->{%s}<---\n", curr);
- req_err(req);
- return -1;
-}
-
-static void ftp_wait_close (tux_req_t *req, int cachemiss);
-static void ftp_wait_syn (tux_req_t *req, int cachemiss);
-
-static int ftp_check_req_err (tux_req_t *req, int cachemiss)
-{
- int state = req->sock->sk->sk_state;
- int err = req->sock->sk->sk_err | req->error;
- int urg = tcp_sk(req->sock->sk)->urg_data;
-
- if (req->data_sock) {
- urg |= tcp_sk(req->data_sock->sk)->urg_data;
- state |= req->data_sock->sk->sk_state;
- err |= req->data_sock->sk->sk_err;
- }
-
- if ((state <= TCP_SYN_RECV) && !err) {
- if (!urg)
- return 0;
- req->in_file.f_pos = 0;
- add_tux_atom(req, flush_request);
- zap_data_socket(req);
- ftp_send_async_message(req, WRITE_ABORTED, 426);
- return 1;
- }
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
- if (tux_TDprintk)
- dump_stack();
-#endif
- req->in_file.f_pos = 0;
- TDprintk("zapping, data sock state: %d (err: %d, urg: %d)\n",
- state, err, urg);
- /*
- * We are in the middle of a file transfer,
- * zap it immediately:
- */
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return 1;
-}
-
-void ftp_send_file (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- SET_TIMESTAMP(req->output_timestamp);
-repeat:
- ret = generic_send_file(req, req->data_sock, cachemiss);
- update_bandwidth(req, req->in_file.f_pos - req->prev_pos);
- req->prev_pos = req->in_file.f_pos;
-
- switch (ret) {
- case -5:
- add_tux_atom(req, ftp_send_file);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, ftp_send_file);
- if (add_output_space_event(req, req->data_sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- break;
- case -3:
- add_tux_atom(req, ftp_send_file);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
-
- if (tux_ftp_wait_close) {
- req->data_sock->ops->shutdown(req->data_sock, SEND_SHUTDOWN);
- add_tux_atom(req, ftp_wait_close);
- add_req_to_workqueue(req);
- return;
- }
- Dprintk("FTP send file req %p finished!\n", req);
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
- break;
- }
-}
-
-#define sk_syn(sk) \
- (!(sk)->sk_err && ((1 << (sk)->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)))
-#define req_syn(req) \
- (!(req)->error && sk_syn((req)->data_sock->sk))
-
-static void ftp_wait_syn (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_wait_syn in: data socket state %d.\n", req->data_sock->state);
- if (req_syn(req)) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (req_syn(req)) {
- add_tux_atom(req, ftp_wait_syn);
- return;
- }
- unidle_req(req);
- }
- Dprintk("ftp_wait_syn out: data socket state %d.\n", req->data_sock->state);
- add_req_to_workqueue(req);
-}
-
-static void ftp_wait_close (tux_req_t *req, int cachemiss)
-{
- struct sock *sk = req->data_sock->sk;
-
- Dprintk("ftp_wait_close: data socket state %d.\n", sk->sk_state);
-
- if (!req->error && (sk->sk_state <= TCP_FIN_WAIT1) && !sk->sk_err) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (!req->error && (sk->sk_state <= TCP_FIN_WAIT1) && !sk->sk_err) {
- add_tux_atom(req, ftp_wait_close);
- return;
- }
- unidle_req(req);
- }
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
-}
-
-void ftp_get_size (tux_req_t *req, int cachemiss)
-{
- char file_size[200];
- int missed, len;
-
- if (!req->dentry) {
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (!missed && !req->dentry) {
- ftp_send_async_message(req, BAD_FILENAME, 200);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_size);
- queue_cachemiss(req);
- return;
- }
- }
- req->in_file.f_pos = 0;
- len = sprintf(file_size, "213 %Li\r\n", req->dentry->d_inode->i_size);
- __ftp_send_async_message(req, file_size, 200, len);
-}
-
-void ftp_get_mdtm (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- char file_mdtm[200];
- unsigned int len;
- int err;
-
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_mdtm);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
- err = permission(dentry->d_inode, MAY_READ, NULL);
- if (err)
- goto out_err_put;
-
- req->in_file.f_pos = 0;
- len = mdtm_time (file_mdtm, dentry->d_inode->i_mtime.tv_sec);
- dput(dentry);
- mntput(mnt);
- __ftp_send_async_message(req, file_mdtm, 200, len);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-static void ftp_get_file (tux_req_t *req, int cachemiss)
-{
- int missed;
-
- if (!req->dentry) {
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (!missed && !req->dentry) {
- ftp_send_async_message(req, BAD_FILENAME, 200);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_file);
- queue_cachemiss(req);
- return;
- }
- }
- Dprintk("ftp_send_file %p, ftp_offset: %Ld, total_len: %Ld.\n", req, req->ftp_offset_start, req->total_file_len);
- req->in_file.f_pos = 0;
- if (req->ftp_offset_start) {
- if (req->ftp_offset_start <= req->total_file_len) {
- req->offset_start = req->ftp_offset_start;
- req->in_file.f_pos = req->offset_start;
- }
- req->ftp_offset_start = 0;
- }
- req->output_len = req->total_file_len - req->offset_start;
- req->prev_pos = req->in_file.f_pos;
- Dprintk("ftp_send_file %p, f_pos: %Ld (out_len: %Ld).\n", req, req->in_file.f_pos, req->output_len);
- add_tux_atom(req, ftp_send_file);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_FILE, 200);
-}
-
-static void __exchange_sockets (tux_req_t *req)
-{
- struct socket *tmp;
-
- tmp = req->data_sock;
- req->data_sock = req->sock;
- req->sock = tmp;
-
- req->in_file.f_pos = 0;
-}
-
-static void ftp_do_ls_start (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_do_ls_start(%p, %d).\n", req, cachemiss);
- if (!req->cwd_dentry)
- TUX_BUG();
- __exchange_sockets(req);
- queue_cachemiss(req);
-}
-
-static void ftp_do_ls_end (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_do_ls_end(%p, %d).\n", req, cachemiss);
- __exchange_sockets(req);
- if (tux_ftp_wait_close) {
- req->data_sock->ops->shutdown(req->data_sock, SEND_SHUTDOWN);
- add_tux_atom(req, ftp_wait_close);
- add_req_to_workqueue(req);
- return;
- }
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
-}
-
-static void ftp_chdir (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- int err;
-
- Dprintk("ftp_chdir(%p, %d, {%s})\n", req, cachemiss, req->objectname);
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_chdir);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
- err = permission(dentry->d_inode, MAY_EXEC, NULL);
- if (err)
- goto out_err_put;
- req->cwd_dentry = dentry;
- req->cwd_mnt = mnt;
- ftp_send_async_message(req, GOOD_DIR, 200);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-void ftp_accept_pasv (tux_req_t *req, int cachemiss)
-{
- struct socket *sock, *new_sock = NULL;
- struct tcp_opt *tp1, *tp2;
- int err;
-
- tp1 = tcp_sk(req->data_sock->sk);
-
- Dprintk("PASV accept on req %p, accept_queue: %p.\n",
- req, tp1->accept_queue);
- if (req->error || (req->data_sock->sk->sk_state != TCP_LISTEN))
- goto error;
-new_socket:
- if (!tp1->accept_queue) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (!tp1->accept_queue) {
- add_tux_atom(req, ftp_accept_pasv);
- return;
- }
- unidle_req(req);
- }
- new_sock = sock_alloc();
- if (!new_sock)
- goto error;
- sock = req->data_sock;
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
-
- err = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- Dprintk("PASV accept() returned %d (state %d).\n", err, new_sock->sk->sk_state);
- if (err < 0)
- goto error;
- if (new_sock->sk->sk_state != TCP_ESTABLISHED)
- goto error;
- /*
- * Do not allow other clients to steal the FTP connection!
- */
- if (inet_sk(new_sock->sk)->daddr != inet_sk(req->sock->sk)->daddr) {
- Dprintk("PASV: ugh, unauthorized connect?\n");
- sock_release(new_sock);
- new_sock = NULL;
- goto new_socket;
- }
- /*
- * Zap the listen socket:
- */
- zap_data_socket(req);
-
- tp2 = tcp_sk(new_sock->sk);
- tp2->nonagle = 2;
- tp2->ack.pingpong = tux_ack_pingpong;
- new_sock->sk->sk_reuse = 1;
- sock_set_flag(new_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(new_sock->sk, SOCK_LINGER);
-
- link_tux_data_socket(req, new_sock);
- add_req_to_workqueue(req);
- return;
-
-error:
- if (new_sock)
- sock_release(new_sock);
- req_err(req);
- zap_data_socket(req);
- ftp_send_async_message(req, CLOSE, 500);
-}
-
-static char * ftp_print_dir_line (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode)
-{
- char *string0 = tmp;
- unsigned int size;
-
- if (req->ftp_command == FTP_COMM_NLST) {
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
- *tmp++ = '\r';
- *tmp++ = '\n';
- *tmp = 0;
- return tmp;
- }
- switch (d_type) {
- default:
- case DT_UNKNOWN:
- case DT_WHT:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = '?';
- break;
-
- case DT_FIFO:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'p';
- break;
-
- case DT_CHR:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'c';
- break;
-
- case DT_DIR:
- *tmp++ = 'd';
- break;
-
- case DT_BLK:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'b';
- break;
-
- case DT_REG:
- *tmp++ = '-';
- break;
-
- case DT_LNK:
- *tmp++ = 'l';
- break;
-
- case DT_SOCK:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 's';
- break;
- }
-
- if (inode->i_mode & S_IRUSR) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWUSR) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXUSR) *tmp++ = 'x'; else *tmp++ = '-';
- if (inode->i_mode & S_IRGRP) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWGRP) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXGRP) *tmp++ = 'x'; else *tmp++ = '-';
- if (inode->i_mode & S_IROTH) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWOTH) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXOTH) *tmp++ = 'x'; else *tmp++ = '-';
-
- *tmp++ = ' ';
-
- size = sprintf(tmp, "%4i %d", inode->i_nlink, inode->i_uid);
- tmp += size;
-
- size = 14 - size;
- if (size <= 0)
- size = 1;
- memset(tmp, ' ', size);
- tmp += size;
-
- size = sprintf(tmp, "%d", inode->i_gid);
- tmp += size;
-
- size = 9 - size;
- if (size <= 0)
- size = 1;
- memset(tmp, ' ', size);
- tmp += size;
-
- tmp += sprintf(tmp, "%8Li", inode->i_size);
- *tmp++ = ' ';
-
- tmp += time_unix2ls(inode->i_mtime.tv_sec, tmp);
- *tmp++ = ' ';
-
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
-
- if (d_type == DT_LNK) {
- int len = 0, max_len;
- #define ARROW " -> "
-
- memcpy(tmp, ARROW, sizeof(ARROW)-1);
- tmp += sizeof(ARROW)-1;
- max_len = MAX_OBJECTNAME_LEN-(tmp-string0);
- if (inode->i_op && inode->i_op->readlink) {
- mm_segment_t oldmm;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- len = inode->i_op->readlink(dentry, tmp, max_len);
- set_fs(oldmm);
- }
- if (len > 0)
- tmp += len;
- else
- Dprintk("hm, readlink() returned %d.\n", len);
- }
- *tmp++ = '\r';
- *tmp++ = '\n';
- *tmp = 0;
-
- return tmp;
-out_dput:
- return NULL;
-}
-
-static void ftp_do_ls_onefile (tux_req_t *req, int cachemiss)
-{
- char string0[MAX_OBJECTNAME_LEN+200], *tmp;
-
- tmp = ftp_print_dir_line(req, string0, req->objectname, req->objectname_len,
-DT_REG, req->dentry, req->dentry->d_inode);
- if (!tmp) {
- req_err(req);
- add_req_to_workqueue(req);
- return;
- }
- if (tmp - string0 >= MAX_OBJECTNAME_LEN+200)
- BUG();
- __ftp_send_async_message(req, string0, 200, tmp - string0);
-}
-
-static void ftp_lookup_listfile (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- int err;
-
- Dprintk("ftp_lookup_listfile(%p, %d, {%s})\n", req, cachemiss, req->objectname);
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_lookup_listfile);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
-
- if (S_ISDIR(dentry->d_inode->i_mode)) {
- err = permission(dentry->d_inode, MAY_EXEC, NULL);
- if (err) {
- Dprintk("Directory permission error: %d.\n", err);
- goto out_err_put;
- }
- install_req_dentry(req, dentry, mnt);
-
- add_tux_atom(req, ftp_do_ls_end);
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
- } else {
- install_req_dentry(req, dentry, mnt);
-
- add_tux_atom(req, ftp_do_ls_end);
- add_tux_atom(req, ftp_do_ls_onefile);
- }
-
- add_tux_atom(req, ftp_do_ls_start);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_LIST, 200);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-static void ftp_execute_command (tux_req_t *req, int cachemiss)
-{
- if (!req->parsed_len)
- TUX_BUG();
- trunc_headers(req);
- req->keep_alive = 1;
-
- switch (req->ftp_command) {
-
-#define ABORTED \
- "226 Abort successful.\r\n"
-
- case FTP_COMM_ABOR:
- {
- zap_data_socket(req);
- ftp_send_async_message(req, ABORTED, 226);
- break;
- }
-
- case FTP_COMM_PWD:
- {
- unsigned int str_len;
- char *buf, *path;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf) {
- req_err(req);
- ftp_send_async_message(req, LIST_ERR_MEM, 200);
- GOTO_ERR;
- }
-
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
-
-// "257 "/" is current directory.\r\n"
-
-#define PART_1 "257 \""
-#define PART_1_LEN (sizeof(PART_1)-1)
-
-#define PART_3 "\" is current directory.\r\n"
-#define PART_3_LEN sizeof(PART_3)
-
- path = tux_print_path(req, req->cwd_dentry, req->cwd_mnt,
- buf+PART_1_LEN, PAGE_SIZE - PART_3_LEN - PART_1_LEN);
-
- if (path < buf + PART_1_LEN)
- BUG();
-
- memcpy(path - PART_1_LEN, PART_1, PART_1_LEN);
- memcpy(buf + PAGE_SIZE-PART_3_LEN-1, PART_3, PART_3_LEN);
- str_len = buf + PAGE_SIZE-1 - (path - PART_1_LEN) - 1;
-
- __ftp_send_async_message(req, path - PART_1_LEN, 226, str_len);
- free_page((unsigned long)buf);
- break;
- }
-
- case FTP_COMM_CDUP:
- {
- memcpy(req->objectname, "..", 3);
- req->objectname_len = 2;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
-
- // fall through to CWD:
- }
- case FTP_COMM_CWD:
- {
- ftp_chdir(req, cachemiss);
- break;
- }
-
- case FTP_COMM_NLST:
- case FTP_COMM_LIST:
- {
- if (!req->data_sock) {
- req_err(req);
- ftp_send_async_message(req, LIST_ERR, 200);
- GOTO_ERR;
- }
- if (req->dentry)
- TUX_BUG();
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
- if (req->objectname_len)
- ftp_lookup_listfile(req, cachemiss);
- else {
- dget(req->cwd_dentry);
- mntget(req->cwd_mnt);
- install_req_dentry(req, req->cwd_dentry, req->cwd_mnt);
- if (!req->dentry)
- TUX_BUG();
- add_tux_atom(req, ftp_do_ls_end);
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
- add_tux_atom(req, ftp_do_ls_start);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_LIST, 200);
- }
- break;
- }
-
- case FTP_COMM_RETR:
- {
- if (!req->data_sock) {
- req_err(req);
- ftp_send_async_message(req, RETR_ERR, 200);
- GOTO_ERR;
- }
- ftp_get_file(req, cachemiss);
- break;
- }
-
- case FTP_COMM_SIZE:
- {
- ftp_get_size(req, cachemiss);
- break;
- }
-
- case FTP_COMM_MDTM:
- {
- ftp_get_mdtm(req, cachemiss);
- break;
- }
-
- case FTP_COMM_PASV:
- {
- char buf [36 + 4*3 + 5 + 10];
- struct socket *data_sock;
- struct sockaddr_in addr;
- unsigned int str_len;
- struct tcp_opt *tp;
- u32 local_addr;
- int err;
-
- if (req->data_sock)
- zap_data_socket(req);
- /*
- * Create FTP data connection to client:
- */
- err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock);
- if (err < 0) {
- Dprintk("sock create err: %d\n", err);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- local_addr = inet_sk(req->sock->sk)->rcv_saddr;
- addr.sin_family = AF_INET;
- addr.sin_port = 0;
- addr.sin_addr.s_addr = local_addr;
- Dprintk("client address: (%d,%d,%d,%d).\n",
- NIPQUAD(inet_sk(req->sock->sk)->daddr));
-
- data_sock->sk->sk_reuse = 1;
- sock_set_flag(data_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(data_sock->sk, SOCK_LINGER);
-
- err = data_sock->ops->bind(data_sock,
- (struct sockaddr*)&addr, sizeof(addr));
- tp = tcp_sk(data_sock->sk);
- tp->nonagle = 2;
- Dprintk("PASV bind() ret: %d.\n", err);
- if (err < 0) {
- req_err(req);
- sock_release(data_sock);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- tp->ack.pingpong = tux_ack_pingpong;
-
- if (!tux_keepalive_timeout)
- tp->linger2 = 0;
- else
- tp->linger2 = tux_keepalive_timeout * HZ;
-
- err = data_sock->ops->listen(data_sock, 1);
- Dprintk("PASV listen() ret: %d\n", err);
- if (err) {
- req_err(req);
- sock_release(data_sock);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- link_tux_data_socket(req, data_sock);
-
- Dprintk("FTP PASV listen sock state: %d, sk state: %d\n",
- data_sock->state, data_sock->sk->sk_state);
-
- str_len = sprintf(buf,
- "227 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n",
- NIPQUAD(local_addr),
- ntohs(inet_sk(data_sock->sk)->sport) / 256,
- ntohs(inet_sk(data_sock->sk)->sport) & 255 );
- Dprintk("PASV mess: {%s}\n", buf);
-
- add_tux_atom(req, ftp_accept_pasv);
- add_tux_atom(req, ftp_flush_req);
- __ftp_send_async_message(req, buf, 227, str_len);
- break;
- }
-
- case FTP_COMM_PORT:
- {
- struct socket *data_sock;
- struct sockaddr_in addr;
- kernel_cap_t saved_cap;
- u32 local_addr;
- int err;
-
- /*
- * Create FTP data connection to client:
- */
- err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock);
- if (err < 0) {
- Dprintk("sock create err: %d\n", err);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- local_addr = inet_sk(req->sock->sk)->rcv_saddr;
- addr.sin_family = AF_INET;
- addr.sin_port = htons(20);
- addr.sin_addr.s_addr = local_addr;
-
- Dprintk("data socket address: (%d,%d,%d,%d).\n",
- NIPQUAD(local_addr));
-
- data_sock->sk->sk_reuse = 1;
- sock_set_flag(data_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(data_sock->sk, SOCK_LINGER);
-
- saved_cap = current->cap_effective;
- cap_raise (current->cap_effective, CAP_NET_BIND_SERVICE);
- err = data_sock->ops->bind(data_sock,
- (struct sockaddr*)&addr, sizeof(addr));
- current->cap_effective = saved_cap;
-
- Dprintk("ACTIVE bind() ret: %d.\n", err);
- if (err) {
- sock_release(data_sock);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- tcp_sk(data_sock->sk)->nonagle = 2;
-
- link_tux_data_socket(req, data_sock);
-
- addr.sin_family = AF_INET;
- addr.sin_port = htons(req->ftp_user_port);
- addr.sin_addr.s_addr = htonl(req->ftp_user_addr);
-
- err = data_sock->ops->connect(data_sock, (struct sockaddr *) &addr, sizeof(addr), O_RDWR|O_NONBLOCK);
- if (err && (err != -EINPROGRESS)) {
- Dprintk("connect error: %d\n", err);
- zap_data_socket(req);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- Dprintk("FTP data sock state: %d, sk state: %d\n", data_sock->state, data_sock->sk->sk_state);
- ftp_send_async_message(req, PORT_OK, 200);
- break;
- }
-
- case FTP_COMM_USER:
- {
- if (!strcmp(req->username, "ftp")
- || !strcmp(req->username, "FTP")
- || !strcmp(req->username, "anonymous")
- || !strcmp(req->username, "ANONYMOUS")) {
- unsigned int str_len;
- char login_ok [200];
-
- if (!tux_ftp_login_message) {
- ftp_send_async_message(req, LOGIN_OK_PASS, 230);
- break;
- }
- update_bandwidth(req, 0); /* get current bandwidth */
- if (nr_requests_used() == 1)
- str_len = sprintf(login_ok, LOGIN_OK_ONE,
- tux_max_connect, ftp_bandwidth);
- else
- str_len = sprintf(login_ok, LOGIN_OK,
- nr_requests_used(), tux_max_connect, ftp_bandwidth);
- __ftp_send_async_message(req, login_ok, 200, str_len);
- } else {
- clear_keepalive(req);
- ftp_send_async_message(req, LOGIN_FORBIDDEN, 530);
- }
- break;
- }
- case FTP_COMM_PASS:
- {
- ftp_send_async_message(req, LOGIN_OK_PASS, 230);
- break;
- }
- case FTP_COMM_SITE:
- {
- ftp_send_async_message(req, SITE, 214);
- break;
- }
- case FTP_COMM_SYST:
- {
- ftp_send_async_message(req, LINUX_SYST, 200);
- break;
- }
- case FTP_COMM_TYPE:
- {
- ftp_send_async_message(req, TYPE_OK, 200);
- break;
- }
-#define EXTRA_FEATURES "211-Extensions supported:\r\n SIZE\r\n MDTM\r\n211 End\r\n"
-
- case FTP_COMM_FEAT:
- {
- ftp_send_async_message(req, EXTRA_FEATURES, 211);
- break;
- }
- case FTP_COMM_HELP:
- case FTP_COMM_CLNT:
- case FTP_COMM_NOOP:
- {
- ftp_send_async_message(req, COMMAND_OK, 200);
- break;
- }
- case FTP_COMM_REST:
- {
- ftp_send_async_message(req, REST_OK, 200);
- break;
- }
- case FTP_COMM_QUIT:
- {
- clear_keepalive(req);
- ftp_send_async_message(req, BYE, 200);
- break;
- }
-
- default:
- {
- req->keep_alive = 1;
- ftp_send_async_message(req, CLOSE, 500);
- break;
- }
- }
- return;
-error:
- Dprintk("rejecting FTP session!\n");
- return;
-}
-
-
-static void ftp_timeout (tux_req_t *req, int cachemiss)
-{
- Dprintk("called ftp_timeout(%p)\n", req);
- if (req->error != TUX_ERROR_CONN_TIMEOUT)
- TUX_BUG();
- ftp_send_async_message(req, CLOSE_TIMEOUT, 421);
-}
-
-static void ftp_close (tux_req_t *req, int cachemiss)
-{
- Dprintk("called ftp_close(%p)\n", req);
- ftp_send_async_message(req, CLOSE, 500);
-}
-
-static void ftp_pre_log (tux_req_t *req)
-{
- if (tux_ftp_log_retr_only && (req->ftp_command != FTP_COMM_RETR))
- req->status = 0;
- else
- req->status = req->ftp_command;
-}
-
-tux_proto_t tux_proto_ftp = {
- defer_accept: 0,
- can_redirect: 0,
- got_request: ftp_got_request,
- parse_message: parse_ftp_message,
- illegal_request: ftp_close,
- request_timeout: ftp_timeout,
- pre_log: ftp_pre_log,
- check_req_err: ftp_check_req_err,
- print_dir_line: ftp_print_dir_line,
- name: "ftp",
-};
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * proto_http.c: HTTP application protocol support
- *
- * Right now we detect simple GET headers, anything more
- * subtle gets redirected to secondary server port.
- */
-
-#include <net/tux.h>
-#include "parser.h"
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-/*
- * Parse the HTTP message and put results into the request structure.
- * CISAPI extensions do not see the actual message buffer.
- *
- * Any perceived irregularity is honored with a redirect to the
- * secondary server - which in most cases should be Apache. So
- * if TUX gets confused by some strange request we fall back
- * to Apache to be RFC-correct.
- *
- * The parser is 'optimistic', ie. it's optimized for the case where
- * the whole message is available and correct. The parser is also
- * supposed to be 'robust', ie. it can be called multiple times with
- * an incomplete message, as new packets arrive.
- */
-
-static inline int TOHEX (char c)
-{
- switch (c) {
- case '0' ... '9': c -= '0'; break;
- case 'a' ... 'f': c -= 'a'-10; break;
- case 'A' ... 'F': c -= 'A'-10; break;
- default:
- c = -1;
- }
- return c;
-}
-
-/*
- * This function determines whether the client supports
- * gzip-type content-encoding.
- */
-static int may_gzip (const char *str, int len)
-{
- const char *tmp, *curr;
- int i;
-
- if (len <= 4)
- return 0;
- tmp = str;
- for (i = 0; i <= len-6; i++) {
- Dprintk("gzip-checking: {%s}\n", tmp);
- if (memcmp(tmp, " gzip", 5)) {
- tmp++;
- continue;
- }
- curr = tmp + 5;
-
- if (*curr == ',' || *curr == '\r')
- return 1;
- if (memcmp(curr, ";q=", 3))
- return 0;
- curr += 3;
- /*
- * Every qvalue except explicitly zero is accepted.
- * Zero values are "q=0.0", "q=0.00", "q=0.000".
- * Parsing is optimized.
- */
- if (*curr == '0') {
- curr += 2;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' || *curr == '\r')
- return 0;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' || *curr == '\r')
- return 0;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' ||
- *curr == '\r')
- return 0;
- }
- }
- }
- }
- return 1;
- }
- return 0;
-}
-
-/*
- * This function strips off 'strip_host_tail' number of hostname
- * components from the tail of the hostname.
- *
- * Eg. with a value of '1', the "somesite.hosting.com" hostname gets
- * transformed into the "somesite" string.
- */
-static void strip_hostname(tux_req_t *req)
-{
- int strip = strip_host_tail;
- int left = req->host_len;
- int component = 0;
-
- if (!strip || !left)
- return;
-
- while (--left) {
- if (req->host[left] != '.')
- continue;
- if (++component == strip)
- break;
- }
- if (!left)
- return;
- req->host[left] = 0;
- req->host_len = left;
-}
-
-static void http_lookup_vhost (tux_req_t *req, int cachemiss);
-static void http_process_message (tux_req_t *req, int cachemiss);
-
-int parse_http_message (tux_req_t *req, const int total_len)
-{
- int hexhex = 0, hex_val_0 = 0, hex_val_1 = 0;
- const char *curr, *uri, *message;
- unsigned int objectname_len, left;
- unsigned int have_r = 0;
- char c;
-
- left = total_len;
- message = req->headers;
- Dprintk("parsing request:\n---\n%s\n---\n", message);
-/*
- * RFC 2616, 5.1:
- *
- * Request-Line = Method SP Request-URI SP HTTP-Version CRLF
- */
-
- if (!total_len)
- TUX_BUG();
-
- curr = message;
-
-#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete_message; } while (0)
-#define GOTO_REDIR do { TDprintk("redirect secondary at %s:%d.\n", __FILE__, __LINE__); goto error; } while (0)
-
-#define PRINT_MESSAGE_LEFT \
- Dprintk("message left (%d) at %s:%d:\n--->{%s}<---\n", left, __FILE__, __LINE__, curr)
-
- switch (*curr) {
- case 'G':
- if (PARSE_METHOD(req,curr,GET,left))
- break;
- GOTO_REDIR;
-
- case 'H':
- if (PARSE_METHOD(req,curr,HEAD,left))
- break;
- GOTO_REDIR;
-
- case 'P':
- if (PARSE_METHOD(req,curr,POST,left))
- break;
- if (PARSE_METHOD(req,curr,PUT,left))
- break;
- GOTO_REDIR;
-
- default:
- GOTO_REDIR;
- }
-
- req->method_str = message;
- req->method_len = curr-message-1;
-
- Dprintk("got method %d\n", req->method);
-
- PRINT_MESSAGE_LEFT;
-
- /*
- * Ok, we got one of the methods we can handle, parse
- * the URI:
- */
-
- {
- // Do not allow leading "../" and intermediate "/../"
- int dotdot = 1;
- char *tmp = req->objectname;
- int slashcheck = 1;
-
- req->uri_str = uri = curr;
-
- for (;;) {
- c = get_c(curr,left);
- if (slashcheck) {
- if (c == '/')
- continue;
- slashcheck = 0;
- }
-
- PRINT_MESSAGE_LEFT;
- if (c == ' ' || ((c == '?') && (tux_ignore_query != 1)) || c == '\r' || c == '\n')
- break;
- if (c == '#')
- GOTO_REDIR;
-
- Dprintk("hexhex: %d.\n", hexhex);
- /*
- * First handle HEX HEX encoding
- */
- switch (hexhex) {
- case 0:
- if (c == '%') {
- hexhex = 1;
- goto continue_parsing;
- }
- break;
- case 1:
- hex_val_0 = TOHEX(c);
- if (hex_val_0 < 0)
- GOTO_REDIR;
- hexhex = 2;
- goto continue_parsing;
- case 2:
- hex_val_1 = TOHEX(c);
- if (hex_val_1 < 0)
- GOTO_REDIR;
- c = (hex_val_0 << 4) | hex_val_1;
- if (!c)
- GOTO_REDIR;
- hexhex = 0;
- break;
- default:
- TUX_BUG();
- }
- if (hexhex)
- TUX_BUG();
-
- switch (dotdot) {
- case 0:
- break;
- case 1:
- if (c == '.')
- dotdot = 2;
- else
- dotdot = 0;
- break;
- case 2:
- if (c == '.')
- dotdot = 3;
- else
- dotdot = 0;
- break;
- case 3:
- if (c == '/')
- GOTO_REDIR;
- else
- dotdot = 0;
- break;
- default:
- TUX_BUG();
- }
- if (!dotdot && (c == '/'))
- dotdot = 1;
-
- *(tmp++) = c;
-continue_parsing:
- if (curr - uri >= MAX_OBJECTNAME_LEN)
- GOTO_REDIR;
- }
- PRINT_MESSAGE_LEFT;
- *tmp = 0;
-
- // handle trailing "/.."
- if (dotdot == 3)
- GOTO_REDIR;
-
- objectname_len = tmp - req->objectname;
- req->objectname_len = objectname_len;
- }
- Dprintk("got filename %s (%d)\n", req->objectname, req->objectname_len);
-
- PRINT_MESSAGE_LEFT;
-
- /*
- * Parse optional query string. Copy until end-of-string or space.
- */
- if (c == '?') {
- int query_len;
- const char *query;
-
- req->query_str = query = curr;
-
- for (;;) {
- c = get_c(curr,left);
- if (c == ' ')
- break;
- if (c == '#')
- GOTO_REDIR;
- }
- if (unlikely(tux_ignore_query == 2))
- req->query_str = NULL;
- else {
- query_len = curr-query-1;
- req->query_len = query_len;
- }
- }
- if (req->query_len)
- Dprintk("got query string %s (%d)\n", req->query_str, req->query_len);
- req->uri_len = curr-uri-1;
- if (!req->uri_len)
- GOTO_REDIR;
- Dprintk("got URI %s (%d)\n", req->uri_str, req->uri_len);
-
- PRINT_MESSAGE_LEFT;
- /*
- * Parse the HTTP version field:
- */
- req->version_str = curr;
- if (!PARSE_TOKEN(curr,"HTTP/1.",left))
- GOTO_REDIR;
-
- switch (get_c(curr,left)) {
- case '0':
- req->version = HTTP_1_0;
- break;
- case '1':
- req->version = HTTP_1_1;
- break;
- default:
- GOTO_REDIR;
- }
- /*
- * We default to keepalive in the HTTP/1.1 case and default
- * to non-keepalive in the HTTP/1.0 case. If max_keepalives
- * is 0 then we do no keepalives.
- */
- clear_keepalive(req);
- if (tux_max_keepalives && (req->version == HTTP_1_1))
- req->keep_alive = 1;
- req->version_len = curr - req->version_str;
-
- if (get_c(curr,left) != '\r')
- GOTO_REDIR;
- if (get_c(curr,left) != '\n')
- GOTO_REDIR;
-
- Dprintk("got version %d [%d]\n", req->version, req->version_len);
- PRINT_MESSAGE_LEFT;
-
- /*
- * Now parse (optional) request header fields:
- */
- for (;;) {
- char c;
-
- c = get_c(curr,left);
- switch (c) {
- case '\r':
- if (have_r)
- GOTO_REDIR;
- have_r = 1;
- continue;
- case '\n':
- if (!have_r)
- GOTO_REDIR;
- goto out;
- default:
- if (have_r)
- GOTO_REDIR;
- }
-
-#define PARSE_STR_FIELD(char,field,str,len) \
- if (PARSE_TOKEN(curr,field,left)) { \
- req->str = curr; \
- SKIP_LINE(curr,left); \
- req->len = curr - req->str - 2; \
- Dprintk(char field "field: %s.\n", req->str); \
- break; \
- }
-
-#define ALLOW_UNKNOWN_FIELDS 1
-#ifdef ALLOW_UNKNOWN_FIELDS
-# define UNKNOWN_FIELD { SKIP_LINE(curr,left); break; }
-#else
-# define UNKNOWN_FIELD GOTO_REDIR
-#endif
-
- switch (c) {
- case 'A':
- PARSE_STR_FIELD("A","ccept: ",
- accept_str,accept_len);
- if (PARSE_TOKEN(curr,"ccept-Encoding: ",left)) {
- const char *str = curr-1;
-
- req->accept_encoding_str = curr;
- SKIP_LINE(curr,left);
- req->accept_encoding_len = curr - req->accept_encoding_str - 2;
- Dprintk("Accept-Encoding field: {%s}.\n", str);
-
- if (tux_compression && may_gzip(str,curr-str)) {
- Dprintk("client accepts gzip!.\n");
- req->may_send_gzip = 1;
- }
- break;
- }
- PARSE_STR_FIELD("A","ccept-Charset: ",
- accept_charset_str,accept_charset_len);
- PARSE_STR_FIELD("A","ccept-Language: ",
- accept_language_str,accept_language_len);
- UNKNOWN_FIELD;
-
- case 'C':
- if (PARSE_TOKEN(curr,"onnection: ",left)) {
-next_token:
- switch (get_c(curr,left)) {
- case 'K':
- if (!PARSE_TOKEN(curr,"eep-Alive",left))
- GOTO_REDIR;
- if (tux_max_keepalives)
- req->keep_alive = 1;
- break;
-
- case 'C':
- case 'c':
- if (!PARSE_TOKEN(curr,"lose",left))
- GOTO_REDIR;
- clear_keepalive(req);
- break;
-
- case 'k':
- if (!PARSE_TOKEN(curr,"eep-alive",left))
- GOTO_REDIR;
- if (tux_max_keepalives)
- req->keep_alive = 1;
- break;
- case 'T':
- if (PARSE_TOKEN(curr,"E",left))
- break;
- if (PARSE_TOKEN(curr,"railers",left))
- break;
- if (PARSE_TOKEN(curr,"ransfer-Encoding",left))
- break;
- GOTO_REDIR;
- case 'P':
- if (PARSE_TOKEN(curr,"roxy-Authenticate",left))
- break;
- if (PARSE_TOKEN(curr,"roxy-Authorization",left))
- break;
- GOTO_REDIR;
- case 'U':
- if (!PARSE_TOKEN(curr,"pgrade",left))
- GOTO_REDIR;
- break;
- case ' ':
- PRINT_MESSAGE_LEFT;
- goto next_token;
- case ',':
- PRINT_MESSAGE_LEFT;
- goto next_token;
- default:
- GOTO_REDIR;
- }
- PRINT_MESSAGE_LEFT;
- if (*curr != '\r')
- goto next_token;
- // allow other tokens.
- SKIP_LINE(curr,left);
- break;
- }
-
- PARSE_STR_FIELD("C","ookie: ",
- cookies_str,cookies_len);
- PARSE_STR_FIELD("C","ontent-Type: ",
- content_type_str,content_type_len);
-
- if (PARSE_TOKEN(curr,"ontent-Length: ",left) ||
- PARSE_TOKEN(curr,"ontent-length: ",left)) {
- const char *tmp;
- req->contentlen_str = curr;
- SKIP_LINE(curr,left);
- req->contentlen_len = curr - req->contentlen_str - 2;
- if (req->contentlen_len) {
- tmp = req->contentlen_str;
- req->content_len = simple_strtoul(tmp, NULL, 10);
- }
- Dprintk("Content-Length field: %s [%d].\n", req->contentlen_str, req->contentlen_len);
- Dprintk("Content-Length value: %d.\n", req->content_len);
- break;
- }
- PARSE_STR_FIELD("C","ache-Control: ",
- cache_control_str,cache_control_len);
- UNKNOWN_FIELD;
-
- case 'H':
- if (PARSE_TOKEN(curr,"ost: ",left)) {
- const char *tmp = curr;
- char *tmp2 = req->host;
-
- /*
- * canonize the hostname:
- *
- * 1) strip off preceding 'www.' variants,
- * 2) transform it to lowercase.
- * 3) strip trailing dots
- * 4) potentially strip off tail
- */
-
-#define is_w(n) ((curr[n] == 'w') || (curr[n] == 'W'))
-
- if ((left > 4) && is_w(0) && is_w(1) &&
- is_w(2) && curr[3] == '.') {
- curr += 4;
- left -= 4;
- tmp = curr;
- }
-
- COPY_LINE_TOLOWER(curr, tmp2, left, req->host+MAX_HOST_LEN-2);
- req->host_len = curr - tmp - 2;
- while (req->host[req->host_len] == '.') {
- if (!req->host_len)
- break;
- req->host_len--;
- }
- req->host[req->host_len] = 0;
- if (strip_host_tail)
- strip_hostname(req);
- Dprintk("Host field: %s [%d].\n", req->host, req->host_len);
- break;
- }
- UNKNOWN_FIELD;
-
- case 'I':
- PARSE_STR_FIELD("I","f-None-Match: ",
- if_none_match_str,if_none_match_len);
- PARSE_STR_FIELD("I","f-Modified-Since: ",
- if_modified_since_str,if_modified_since_len);
- PARSE_STR_FIELD("I","f-Range: ",
- if_range_str,if_range_len);
- UNKNOWN_FIELD;
-
- case 'N':
- PARSE_STR_FIELD("N","egotiate: ",
- negotiate_str,negotiate_len);
- UNKNOWN_FIELD;
-
- case 'P':
- PARSE_STR_FIELD("P","ragma: ",
- pragma_str,pragma_len);
- UNKNOWN_FIELD;
-
- case 'R':
-
- PARSE_STR_FIELD("R","eferer: ",
- referer_str,referer_len);
- if (!PARSE_TOKEN(curr,"ange: bytes=",left))
- UNKNOWN_FIELD;
- {
- const char *tmp = curr;
- char *tmp2 = (char *)curr;
- unsigned int offset_start = 0, offset_end = 0;
-
- if (*tmp2 != '-')
- offset_start = simple_strtoul(tmp2, &tmp2, 10);
- if (*tmp2 == '-') {
- tmp2++;
- if (*tmp2 != '\r')
- offset_end = simple_strtoul(tmp2, &tmp2, 10) +1;
- }
- curr = tmp2;
- left -= tmp2-tmp;
-
- req->offset_start = offset_start;
- req->offset_end = offset_end;
-
- SKIP_LINE(curr,left);
- Dprintk("Range field: %s [%d] (%d-%d).\n", tmp, curr-tmp, offset_start, offset_end);
- break;
- }
-
- case 'U':
- PARSE_STR_FIELD("U","ser-Agent: ",
- user_agent_str,user_agent_len);
- UNKNOWN_FIELD;
-
- default:
- UNKNOWN_FIELD;
- }
- PRINT_MESSAGE_LEFT;
- }
-out:
- /*
- * POST data.
- */
- if ((req->method == METHOD_POST) && req->content_len) {
- PRINT_MESSAGE_LEFT;
- if (curr + req->content_len > message + total_len)
- GOTO_INCOMPLETE;
- req->post_data_str = curr;
- req->post_data_len = req->content_len;
- curr += req->content_len;
- left -= req->content_len;
- Dprintk("POST-ed data: {%s}\n", req->post_data_str);
- }
-
- switch (req->method) {
- default:
- GOTO_REDIR;
- case METHOD_GET:
- case METHOD_HEAD:
- case METHOD_POST:
- case METHOD_PUT:
- ;
- }
-
-#define TUX_SCHEME "http://"
-#define TUX_SCHEME_LEN (sizeof(TUX_SCHEME)-1)
-
- if (!memcmp(req->objectname, TUX_SCHEME, TUX_SCHEME_LEN)) {
-
- /* http://user:password@host:port/object */
-
- const char *head, *tail, *end, *host, *port;
- int host_len, objectname_len;
-
- head = req->objectname + TUX_SCHEME_LEN;
- end = req->objectname + req->objectname_len;
-
- tail = memchr(head, '/', end - head);
- if (!tail)
- GOTO_REDIR;
- host = memchr(head, '@', tail - head);
- if (!host)
- host = head;
- else
- host++;
- if (!*host)
- GOTO_REDIR;
- port = memchr(host, ':', tail - host);
- if (port)
- host_len = port - host;
- else
- host_len = tail - host;
- if (host_len >= MAX_HOST_LEN)
- GOTO_REDIR;
- memcpy(req->host, host, host_len);
- req->host_len = host_len;
- req->host[host_len] = 0;
-
- if (*tail != '/')
- TUX_BUG();
-
- req->uri_str = tail;
- req->uri_len = end - tail;
-
- tail++;
- while (*tail == '/')
- tail++;
-
- objectname_len = end - tail;
- memcpy(req->objectname, tail, objectname_len);
- req->objectname_len = objectname_len;
- req->objectname[objectname_len] = 0;
- } else
- if (req->uri_str[0] != '/')
- GOTO_REDIR;
-
- if ((req->version == HTTP_1_1) && !req->host_len)
- GOTO_REDIR;
- if (req->objectname[0] == '/')
- GOTO_REDIR;
- /*
- * Lets make sure nobody plays games with the host
- * header in a virtual hosting environment:
- */
- if (req->virtual && req->host_len) {
- if (memchr(req->host, '/', req->host_len))
- GOTO_REDIR;
- if (req->host[0] == '.') {
- if (req->host_len == 1)
- GOTO_REDIR;
- if ((req->host_len == 2) && (req->host[0] == '.'))
- GOTO_REDIR;
- }
- }
- /*
- * From this point on the request is for the main TUX engine:
- */
- Dprintk("ok, request accepted.\n");
-
- if (req->keep_alive) {
- req->nr_keepalives++;
- if (req->nr_keepalives == -1)
- req->nr_keepalives--;
- INC_STAT(nr_keepalive_reqs);
- } else
- INC_STAT(nr_nonkeepalive_reqs);
- INC_STAT(keepalive_hist[req->nr_keepalives]);
-
- PRINT_MESSAGE_LEFT;
- req->parsed_len = curr-message;
- if (req->dentry)
- TUX_BUG();
- req->virtual = tux_virtual_server;
- if (req->virtual)
- add_tux_atom(req, http_lookup_vhost);
- else {
- req->docroot_dentry = dget(req->proto->main_docroot.dentry);
- req->docroot_mnt = mntget(req->proto->main_docroot.mnt);
- add_tux_atom(req, http_process_message);
- }
-
- return req->parsed_len;
-
-incomplete_message:
- Dprintk("incomplete message!\n");
- PRINT_MESSAGE_LEFT;
-
- return 0;
-
-error:
- if (total_len > 0)
- req->parsed_len = total_len;
- else
- req->parsed_len = 0;
- PRINT_MESSAGE_LEFT;
- if (tux_TDprintk) {
- TDprintk("redirecting message to secondary server.\n");
- print_req(req);
- }
- return -1;
-}
-
-static int lookup_url (tux_req_t *req, const unsigned int flag)
-{
- /*
- * -1 : no previous checks made
- * 0 : previous check failed, do not check farther,
- * 1 : previous check successed, check farther
- */
- int not_modified = -1;
- int perm = 0, i;
- struct dentry *dentry = NULL;
- struct vfsmount *mnt = NULL;
- struct inode *inode;
- const char *filename;
-
- /*
- * Do not do any etag or last_modified header checking
- * if both unset.
- */
- if (!tux_generate_etags && !tux_generate_last_mod)
- not_modified = 0;
-
-repeat_lookup:
- if (req->dentry)
- TUX_BUG();
-
- filename = req->objectname;
- Dprintk("will look up {%s} (%d)\n", filename, req->objectname_len);
- Dprintk("current->fsuid: %d, current->fsgid: %d, ngroups: %d\n",
- current->fsuid, current->fsgid, current->group_info->ngroups);
- for (i = 0; i < current->group_info->ngroups; i++)
- Dprintk(".. group #%d: %d.\n", i, current->groups[i]);
-
- dentry = tux_lookup(req, filename, flag, &mnt);
-
-#define INDEX "/index.html"
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- goto cachemiss;
-
- if (tux_http_dir_indexing && (req->lookup_dir == 1)) {
- // undo the index.html appending:
- req->objectname_len -= sizeof(INDEX)-1;
- req->objectname[req->objectname_len] = 0;
- req->lookup_dir = 2;
- goto repeat_lookup;
- }
- if (!req->lookup_404) {
- int len = strlen(tux_404_page);
- memcpy(req->objectname, tux_404_page, len);
- req->objectname[len] = 0;
- req->objectname_len = len;
- req->lookup_404 = 1;
- req->status = 404;
- goto repeat_lookup;
- }
- TDprintk("abort - lookup error.\n");
- goto abort;
- }
-
- Dprintk("SUCCESS, looked up {%s} == dentry %p (inode %p, count %d.)\n", filename, dentry, dentry->d_inode, atomic_read(&dentry->d_count));
- inode = dentry->d_inode;
-
- /*
- * At this point we have a real, non-negative dentry.
- */
- perm = tux_permission(inode);
-
- if ((perm < 0) || (!S_ISDIR(dentry->d_inode->i_mode)
- && !S_ISREG(dentry->d_inode->i_mode))) {
- Dprintk("FAILED trusted dentry %p permission %d.\n", dentry, perm);
- req->status = 403;
- goto abort;
- }
- if ((req->lookup_dir != 2) && S_ISDIR(dentry->d_inode->i_mode)) {
- if (req->lookup_dir || (req->objectname_len +
- sizeof(INDEX) >= MAX_OBJECTNAME_LEN)) {
- req->status = 403;
- goto abort;
- }
- if (req->objectname_len && (req->objectname[req->objectname_len-1] != '/')) {
- dput(dentry);
- mntput(mnt);
- req->lookup_dir = 0;
- return 2;
- }
- memcpy(req->objectname + req->objectname_len,
- INDEX, sizeof(INDEX));
- req->objectname_len += sizeof(INDEX)-1;
- req->lookup_dir = 1;
- dput(dentry);
- mntput(mnt);
- mnt = NULL;
- dentry = NULL;
- goto repeat_lookup;
- }
- if (tux_max_object_size && (inode->i_size > tux_max_object_size)) {
- TDprintk("too big object, %Ld bytes.\n", inode->i_size);
- req->status = 403;
- goto abort;
- }
- req->total_file_len = inode->i_size;
- req->mtime = inode->i_mtime.tv_sec;
-
- {
- loff_t num = req->total_file_len;
- int nr_digits = 0;
- unsigned long modulo;
- char * etag_p = req->etag;
- char digits [30];
-
- do {
- modulo = do_div(num, 10);
- digits[nr_digits++] = '0' + modulo;
- } while (num);
-
- req->lendigits = nr_digits;
- req->etaglen = nr_digits;
-
- while (nr_digits)
- *etag_p++ = digits[--nr_digits];
-
- *etag_p++ = '-';
- num = req->mtime;
- nr_digits = 0;
-
- do {
- digits[nr_digits++] = 'a' + num % 16;
- num /= 16;
- } while (num);
- req->etaglen += nr_digits+1;
- while (nr_digits)
- *etag_p++ = digits[--nr_digits];
- *etag_p = 0;
- }
-
- if ((req->if_none_match_len >= req->etaglen) && (abs(not_modified) == 1)) {
-
- char * etag_p = req->etag;
- const char * match_p = req->if_none_match_str;
- int pos = req->etaglen - 1;
- int matchpos = req->etaglen - 1;
-
- do {
- while (etag_p[matchpos--] == match_p[pos--])
- if (matchpos < 0)
- break;
- if (matchpos < 0)
- pos = req->if_none_match_len;
- else {
- if (match_p[pos+1] == ',')
- pos += req->etaglen + 2;
- else
- pos += req->etaglen-matchpos;
- matchpos = req->etaglen - 1;
- }
- } while (pos < req->if_none_match_len);
-
- if (matchpos < 0) {
- not_modified = 1;
- TDprintk("Etag matched.\n");
- } else
- not_modified = 0;
- }
-
- if ((req->if_modified_since_len >= 24) && (abs(not_modified) == 1)) {
- if (parse_time(req->if_modified_since_str, req->if_modified_since_len) >= req->mtime ) {
- not_modified = 1;
- Dprintk("Last-Modified matched.\n");
- } else
- not_modified = 0;
- }
-
- if (not_modified == 1) {
- req->status = 304;
- goto abort;
- }
-
- Dprintk("looked up cached dentry %p, (count %d.)\n", dentry, dentry ? atomic_read(&dentry->d_count) : -1 );
-
- url_hist_hit(req->total_file_len);
-out:
- install_req_dentry(req, dentry, mnt);
- req->lookup_dir = 0;
- return 0;
-
-cachemiss:
- return 1;
-
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
-#if CONFIG_TUX_DEBUG
- if (!not_modified) {
- TDprintk("req %p has lookup errors!\n", req);
- if (tux_TDprintk)
- print_req(req);
- }
-#endif
- req_err(req);
- goto out;
-}
-
-int handle_gzip_req (tux_req_t *req, unsigned int flags)
-{
- char *curr = req->objectname + req->objectname_len;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- struct inode *inode, *orig_inode;
- loff_t size, orig_size;
-
- *curr++ = '.';
- *curr++ = 'g';
- *curr++ = 'z';
- *curr++ = 0;
- req->objectname_len += 3;
-
- dentry = tux_lookup(req, req->objectname, flags, &mnt);
-
- req->objectname_len -= 3;
- req->objectname[req->objectname_len] = 0;
-
- if (!dentry)
- return 0;
- if (IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- release_req_dentry(req);
- return 1;
- }
- return 0;
- }
-
- inode = dentry->d_inode;
- size = inode->i_size;
- orig_inode = req->dentry->d_inode;
- orig_size = orig_inode->i_size;
-
- if (!tux_permission(inode)
- && (size < orig_size)
- && (inode->i_mtime.tv_sec >= orig_inode->i_mtime.tv_sec)) {
-
- release_req_dentry(req);
- install_req_dentry(req, dentry, mnt);
- req->total_file_len = req->output_len = size;
- Dprintk("content WILL be gzipped!\n");
- req->content_gzipped = 1;
- } else {
- dput(dentry);
- mntput(mnt);
- }
-
- return 0;
-}
-
-static spinlock_t mimetypes_lock = SPIN_LOCK_UNLOCKED;
-
-static LIST_HEAD(mimetypes_head);
-
-static mimetype_t default_mimetype = { type: "text/plain", type_len: 10, expire_str: "", expire_str_len: 0 };
-
-#define MAX_MIMETYPE_LEN 128
-#define MAX_CACHE_CONTROL_AGE_LEN 30
-
-void add_mimetype (char *new_ext, char *new_type, char *new_expire)
-{
- int type_len = strlen(new_type);
- int ext_len = strlen(new_ext);
- int expire_len = strlen(new_expire);
- mimetype_t *mime;
- char *ext, *type, *expire;
-
- if (type_len > MAX_MIMETYPE_LEN)
- type_len = MAX_MIMETYPE_LEN;
- if (ext_len > MAX_URI_LEN)
- ext_len = MAX_URI_LEN;
- if (expire_len > MAX_CACHE_CONTROL_AGE_LEN)
- expire_len = MAX_CACHE_CONTROL_AGE_LEN;
-
- mime = tux_kmalloc(sizeof(*mime));
- memset(mime, 0, sizeof(*mime));
- ext = tux_kmalloc(ext_len + 1);
- type = tux_kmalloc(type_len + 1);
- expire = tux_kmalloc(expire_len + 1);
-
- strncpy(ext, new_ext, ext_len);
- strncpy(type, new_type, type_len);
- strncpy(expire, new_expire, expire_len);
-
- // in case one of the above parameters was too long :
-
- ext[ext_len] = '\0';
- type[type_len] = '\0';
- expire[expire_len] = '\0';
-
- mime->ext = ext;
- mime->ext_len = ext_len;
-
- mime->type = type;
- mime->type_len = type_len;
-
- mime->expire_str = expire;
- mime->expire_str_len = expire_len;
-
- mime->special = NORMAL_MIME_TYPE;
- if (!strcmp(type, "TUX/redirect"))
- mime->special = MIME_TYPE_REDIRECT;
- if (!strcmp(type, "TUX/CGI"))
- mime->special = MIME_TYPE_CGI;
- if (!strcmp(type, "TUX/module"))
- mime->special = MIME_TYPE_MODULE;
-
- spin_lock(&mimetypes_lock);
- list_add(&mime->list, &mimetypes_head);
- spin_unlock(&mimetypes_lock);
-}
-
-static inline int ext_matches (char *file, int len, char *ext, int extlen)
-{
- int i;
- char *tmp = file + len-1;
- char *tmp2 = ext + extlen-1;
-
- if (len < extlen)
- return 0;
-
- for (i = 0; i < extlen; i++) {
- if (*tmp != *tmp2)
- return 0;
- tmp--;
- tmp2--;
- }
- return 1;
-}
-
-/*
- * Overhead is not a problem, we cache the MIME type
- * in the dentry.
- */
-static mimetype_t * lookup_mimetype (tux_req_t *req)
-{
- char *objectname = req->objectname;
- int len = req->objectname_len;
- mimetype_t *mime = NULL;
- struct list_head *head, *tmp, *tmp1, *tmp2, *tmp3;
-
- if (!memchr(objectname, '.', len))
- goto out;
-
- spin_lock(&mimetypes_lock);
- head = &mimetypes_head;
- tmp = head->next;
-
- while (tmp != head) {
- mime = list_entry(tmp, mimetype_t, list);
- if (ext_matches(objectname, len, mime->ext, mime->ext_len)) {
- /*
- * Percolate often-used mimetypes up:
- */
- if (tmp->prev != &mimetypes_head) {
- tmp1 = tmp;
- tmp2 = tmp->prev;
- tmp3 = tmp->prev->prev;
- list_del(tmp1);
- list_del(tmp2);
- list_add(tmp, tmp3);
- list_add(tmp2, tmp);
- }
- break;
- } else
- mime = NULL;
- tmp = tmp->next;
- }
- spin_unlock(&mimetypes_lock);
-
-out:
- if (!mime)
- mime = &default_mimetype;
- return mime;
-}
-
-void free_mimetypes (void)
-{
- struct list_head *head, *tmp, *next;
- mimetype_t *mime;
-
- spin_lock(&mimetypes_lock);
- head = &mimetypes_head;
- tmp = head->next;
-
- while (tmp != head) {
- next = tmp->next;
- mime = list_entry(tmp, mimetype_t, list);
- list_del(tmp);
-
- kfree(mime->ext);
- mime->ext = NULL;
- kfree(mime->type);
- mime->type = NULL;
- kfree(mime);
-
- tmp = next;
- }
- spin_unlock(&mimetypes_lock);
-}
-
-/*
- * Various constant HTTP responses:
- */
-
-static const char forbidden[] =
- "HTTP/1.1 403 Forbidden\r\n"
- "Connection: Keep-Alive\r\n" \
- "Content-Length: 24\r\n\r\n"
- "<HTML> Forbidden </HTML>";
-
-static const char not_found[] =
- "HTTP/1.1 404 Not Found\r\n"
- "Connection: Keep-Alive\r\n" \
- "Content-Length: 29\r\n\r\n"
- "<HTML> Page Not Found </HTML>";
-
-#define NOTMODIFIED_1 \
- "HTTP/1.1 304 Not Modified\r\n" \
- "Connection: Keep-Alive\r\n" \
- "Date: "
-
-#define NOTMODIFIED_1_LEN (sizeof(NOTMODIFIED_1) - 1)
-
-#define NOTMODIFIED_2 \
- "\r\nETag: \""
-
-#define NOTMODIFIED_2_LEN (sizeof(NOTMODIFIED_2) - 1)
-
-#define NOTMODIFIED_3 \
- "\"\r\n\r\n"
-
-#define NOTMODIFIED_3_LEN (sizeof(NOTMODIFIED_3) - 1)
-
-#define REDIRECT_1 \
- "HTTP/1.1 301 Moved Permanently\r\n" \
- "Location: http://"
-
-#define REDIRECT_1_LEN (sizeof(REDIRECT_1) - 1)
-
-#define REDIRECT_2 \
- "/\r\nContent-Length: 36\r\n" \
- "Connection: Keep-Alive\r\n" \
- "Content-Type: text/html\r\n\r\n" \
- "<HTML> 301 Moved Permanently </HTML>"
-
-#define REDIRECT_2_LEN (sizeof(REDIRECT_2) - 1)
-
-void send_async_err_forbidden (tux_req_t *req)
-{
- send_async_message(req, forbidden, 403, 1);
-}
-
-void send_async_err_not_found (tux_req_t *req)
-{
- send_async_message(req, not_found, 404, 1);
-}
-
-static void send_ret_notmodified (tux_req_t *req)
-{
- char *buf;
- int size;
-
- size = NOTMODIFIED_1_LEN + DATE_LEN - 1 + NOTMODIFIED_2_LEN + req->etaglen + NOTMODIFIED_3_LEN;
- buf = get_abuf(req, size);
- memcpy(buf, NOTMODIFIED_1, NOTMODIFIED_1_LEN);
- buf += NOTMODIFIED_1_LEN;
- memcpy(buf, tux_date, DATE_LEN-1);
- buf += DATE_LEN-1;
- memcpy(buf, NOTMODIFIED_2, NOTMODIFIED_2_LEN);
- buf += NOTMODIFIED_2_LEN;
- memcpy(buf, &req->etag, req->etaglen);
- buf += req->etaglen;
- memcpy(buf, NOTMODIFIED_3, NOTMODIFIED_3_LEN);
- buf += NOTMODIFIED_3_LEN;
-
- req->status = 304;
- send_abuf(req, size, MSG_DONTWAIT);
- add_req_to_workqueue(req);
-}
-
-static void send_ret_redirect (tux_req_t *req, int cachemiss)
-{
- char *buf;
- unsigned int size;
- unsigned int uts_len = 0;
-
- size = REDIRECT_1_LEN;
- if (req->host_len)
- size += req->host_len;
- else {
- down_read(&uts_sem);
- uts_len = strlen(system_utsname.nodename);
- size += uts_len;
- }
- if (req->objectname[0] != '/')
- size++;
- size += req->objectname_len;
- size += REDIRECT_2_LEN;
-
- if (size > PAGE_SIZE) {
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return;
- }
-
- buf = get_abuf(req, size);
-
- memcpy(buf, REDIRECT_1, REDIRECT_1_LEN);
- buf += REDIRECT_1_LEN;
-
- Dprintk("req %p, host: %s, host_len: %d.\n", req, req->host, req->host_len);
- if (req->host_len) {
- memcpy(buf, req->host, req->host_len);
- buf += req->host_len;
- } else {
- memcpy(buf, system_utsname.nodename, uts_len);
- up_read(&uts_sem);
- buf += uts_len;
- }
- if (req->objectname[0] != '/') {
- buf[0] = '/';
- buf++;
- }
-
- memcpy(buf, req->objectname, req->objectname_len);
- buf += req->objectname_len;
-
- memcpy(buf, REDIRECT_2, REDIRECT_2_LEN);
- buf += REDIRECT_2_LEN;
-
- req->status = 301;
- send_abuf(req, size, MSG_DONTWAIT);
- add_req_to_workqueue(req);
-}
-
-static void http_got_request (tux_req_t *req)
-{
- req->host[0] = 0;
- req->host_len = 0;
- add_tux_atom(req, parse_request);
- add_req_to_workqueue(req);
-}
-
-
-tux_attribute_t * lookup_tux_attribute (tux_req_t *req)
-{
- tux_attribute_t *attr;
- struct inode *inode;
- mimetype_t *mime;
-
- attr = tux_kmalloc(sizeof(*attr));
- memset(attr, 0, sizeof(*attr));
-
- mime = lookup_mimetype(req);
-
- inode = req->dentry->d_inode;
- if (!inode->i_uid && !inode->i_gid) {
- if (mime->special == MIME_TYPE_MODULE) {
- attr->tcapi = lookup_tuxmodule(req->objectname);
- if (!attr->tcapi) {
- req_err(req);
- mime = &default_mimetype;
- }
- }
- } else {
- if (mime->special && (mime->special != MIME_TYPE_REDIRECT))
- mime = &default_mimetype;
- }
- attr->mime = mime;
-
- return attr;
-}
-
-static void handle_range(tux_req_t *req)
-{
- if (req->if_range_len) {
- time_t range_time;
-
- range_time = parse_time(req->if_range_str, req->if_range_len);
-
- /*
- * If the file is newer then we send the whole file.
- */
- if (range_time < req->mtime )
- goto out_no_range;
- }
- /* if no offset_end was specified then default to 'end of file': */
- if (!req->offset_end)
- req->offset_end = req->total_file_len;
- /*
- * Sanity checks:
- *
- * - is the range between 0...file_len-1 ?
- * - is offset_end after offset_start?
- *
- * (note that offset_end is higher by 1)
- */
- if ((req->offset_end > req->total_file_len) ||
- (req->offset_start >= req->total_file_len) ||
- (req->offset_end <= req->offset_start))
- goto out_no_range;
- /*
- * If the range is 0...file_len-1 then send the whole file:
- */
- if (!req->offset_start && (req->offset_end == req->total_file_len))
- goto out_no_range;
-
- /* ok, the range is valid, use it: */
-
- req->output_len = req->offset_end - req->offset_start;
- req->in_file.f_pos = req->offset_start;
- return;
-
-out_no_range:
- req->offset_start = 0;
- req->offset_end = 0;
-}
-
-static void http_pre_header (tux_req_t *req, int push);
-static void http_post_header (tux_req_t *req, int cachemiss);
-static void http_send_body (tux_req_t *req, int cachemiss);
-
-#define DIRLIST_HEAD_1 "\
-<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\
-<HTML><HEAD><TITLE>Index of %s</TITLE></HEAD><BODY>\
-<H1>Index of %s </H1><PRE><HR>\n%s"
-
-#define DIRLIST_HEAD_2 "\
-<IMG SRC=\"/icons/back.gif\"ALT=\"[DIR]\"> <A HREF=\"../\">Parent Directory</A>\n"
-
-#define DIRLIST_HEAD_SIZE (sizeof(DIRLIST_HEAD_1) + sizeof(DIRLIST_HEAD_2))
-
-static void http_dirlist_head (tux_req_t *req, int cachemiss)
-{
- char *buf1, *buf2, *path;
- int len;
-
- buf1 = (char *)__get_free_page(GFP_KERNEL);
- buf2 = (char *)__get_free_page(GFP_KERNEL);
- if (!buf1 || !buf2)
- goto out;
- path = tux_print_path(req, req->dentry, req->mnt, buf1, PAGE_SIZE);
- if (path[0] == '/' && path[1] == '/' && !path[3])
- path = "/";
- if (2*strlen(path) + DIRLIST_HEAD_SIZE >= PAGE_SIZE)
- goto out;
- len = sprintf(buf2, DIRLIST_HEAD_1, path, path, req->dentry == req->docroot_dentry ? "" : DIRLIST_HEAD_2);
- __send_async_message(req, buf2, 200, len, 0);
-
-out:
- if (buf1)
- free_page((unsigned long)buf1);
- if (buf2)
- free_page((unsigned long)buf2);
-}
-
-#define DIRLIST_TAIL "\
-</PRE><HR><ADDRESS><IMG SRC=\"/icons/tuxlogo.gif\"ALIGN=\"MIDDLE\"ALT=\"[TUX]\">Powered by Linux/TUX 3.0</ADDRESS>\n</BODY></HTML>"
-
-static void http_dirlist_tail (tux_req_t *req, int cachemiss)
-{
- __send_async_message(req, DIRLIST_TAIL, 200, sizeof(DIRLIST_TAIL)-1, 1);
-}
-
-static void http_dirlist (tux_req_t *req, int cachemiss)
-{
- int head = (req->method == METHOD_HEAD);
-
- req->lookup_dir = 3;
- clear_keepalive(req);
- if (!head) {
- add_tux_atom(req, http_dirlist_tail);
- add_tux_atom(req, list_directory);
- add_tux_atom(req, http_dirlist_head);
- }
- http_pre_header(req, head);
- add_req_to_workqueue(req);
-}
-
-static char *host_path_hash(tux_req_t *req, char *tmp)
-{
- if (req->host_len < 2)
- return NULL;
-
- switch (mass_hosting_hash) {
- default:
- case 0:
- return req->host;
- case 1:
-
- // www.ABCDEFG.com => A/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- memcpy(tmp + 2, req->host, req->host_len);
- tmp[req->host_len + 2] = 0;
-
- return tmp;
- case 2:
- // www.ABCDEFG.com => A/AB/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- tmp[2] = req->host[0];
- tmp[3] = req->host[1];
- tmp[4] = '/';
- memcpy(tmp + 5, req->host, req->host_len);
- tmp[req->host_len + 5] = 0;
-
- return tmp;
- case 3:
- // www.ABCDEFG.com => A/AB/ABC/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- tmp[2] = req->host[0];
- tmp[3] = req->host[1];
- tmp[4] = '/';
- tmp[5] = req->host[0];
- tmp[6] = req->host[1];
- tmp[7] = req->host[2];
- tmp[8] = '/';
- memcpy(tmp + 9, req->host, req->host_len);
- tmp[req->host_len + 9] = 0;
-
- return tmp;
- }
-}
-
-static struct dentry * vhost_lookup (tux_req_t *req, struct nameidata* base, struct vfsmount **mnt)
-{
- struct dentry *dentry = NULL;
- // 255.255.255.255
- char ip [3+1+3+1+3+1+3 + 2];
-
- if (req->virtual >= TUX_VHOST_IP) {
- sprintf(ip, "%d.%d.%d.%d",
- NIPQUAD(inet_sk(req->sock->sk)->rcv_saddr));
- dentry = __tux_lookup (req, ip, base, mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- goto lookup_default;
- }
- if (req->virtual == TUX_VHOST_IP)
- goto done;
-
- // fall through in mixed mode:
- }
-
- if (!req->host_len) {
-lookup_default:
- *mnt = NULL;
- dentry = __tux_lookup (req, tux_default_vhost, base, mnt);
- } else {
- char tmp [MAX_HOST_LEN*2];
- char *host_path;
-
- host_path = host_path_hash(req, tmp);
- Dprintk("host path hash returned: {%s}\n", host_path);
-
- dentry = NULL;
- if (host_path) {
- *mnt = NULL;
- dentry = __tux_lookup (req, host_path, base, mnt);
- }
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- if (req->virtual >= TUX_VHOST_IP) {
- *mnt = NULL;
- dentry = __tux_lookup (req, ip, base, mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- }
- }
- goto lookup_default;
- }
- }
-done:
- return dentry;
-}
-
-static void http_lookup_vhost (tux_req_t *req, int cachemiss)
-{
- struct dentry *dentry;
- struct nameidata base;
- struct vfsmount *mnt = NULL;
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
-
- Dprintk("http_lookup_vhost(%p, %d, virtual: %d, host: %s (%d).)\n", req, flag, req->virtual, req->host, req->host_len);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->proto->main_docroot.dentry);
- base.mnt = mntget(req->proto->main_docroot.mnt);
-
- dentry = vhost_lookup(req, &base, &mnt);
-
- Dprintk("looked up dentry %p.\n", dentry);
-
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- add_tux_atom(req, http_lookup_vhost);
- queue_cachemiss(req);
- return;
- }
- goto abort;
- }
-
- req->docroot_dentry = dentry;
- req->docroot_mnt = mnt;
-
- add_tux_atom(req, http_process_message);
- add_req_to_workqueue(req);
- return;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- add_req_to_workqueue(req);
-}
-
-static void http_process_message (tux_req_t *req, int cachemiss)
-{
- tux_attribute_t *attr;
- int missed;
- unsigned int lookup_flag = cachemiss ? 0 : LOOKUP_ATOMIC;
-
- Dprintk("handling req %p, cachemiss: %d.\n", req, cachemiss);
-
- /*
- * URL redirection support - redirect all valid requests
- * to the first userspace module.
- */
- if (tux_all_userspace) {
- tcapi_template_t *tcapi = get_first_usermodule();
- if (tcapi) {
- req->usermode = 1;
- req->usermodule_idx = tcapi->userspace_id;
- goto usermode;
- }
- }
- missed = lookup_url(req, lookup_flag);
- if (missed == 2) {
- if (req->query_str) {
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- send_ret_redirect(req, cachemiss);
- return;
- }
- if (req->error)
- goto error;
- if (missed) {
-cachemiss:
- if (cachemiss)
- TUX_BUG();
- Dprintk("uncached request.\n");
- INC_STAT(static_lookup_cachemisses);
- if (req->dentry)
- TUX_BUG();
- add_tux_atom(req, http_process_message);
- queue_cachemiss(req);
- return;
- }
- /*
- * HTML directory indexing.
- */
- if (S_ISDIR(req->dentry->d_inode->i_mode))
- return http_dirlist(req, cachemiss);
- if (!S_ISREG(req->dentry->d_inode->i_mode))
- TUX_BUG();
-
-
- attr = req->dentry->d_extra_attributes;
- if (!attr) {
- attr = lookup_tux_attribute(req);
- if (!attr)
- TUX_BUG();
- req->dentry->d_extra_attributes = attr;
- }
- if (attr->mime)
- Dprintk("using MIME type %s:%s, %d.\n", attr->mime->type, attr->mime->ext, attr->mime->special);
- if (attr->tcapi) {
- req->usermode = 1;
- req->usermodule_idx = attr->tcapi->userspace_id;
- if (req->module_dentry)
- TUX_BUG();
- req->module_dentry = dget(req->dentry);
- release_req_dentry(req);
- goto usermode;
- }
-
- switch (attr->mime->special) {
- case MIME_TYPE_MODULE:
- req->usermode = 1;
- goto usermode;
-
- case MIME_TYPE_REDIRECT:
- req->error = TUX_ERROR_REDIRECT;
- goto error;
-
- case MIME_TYPE_CGI:
-#if CONFIG_TUX_EXTCGI
- Dprintk("CGI request %p.\n", req);
- query_extcgi(req);
- return;
-#endif
-
- default:
- if (req->query_str) {
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- }
- req->attr = attr;
- switch (req->method) {
- case METHOD_GET:
- case METHOD_HEAD:
- break;
- default:
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- if (req->usermode)
- TUX_BUG();
-
- req->output_len = req->total_file_len;
- /*
- * Do range calculations.
- */
- if (req->offset_end || req->offset_start)
- handle_range(req);
-
- if (req->may_send_gzip && !req->offset_start && !req->offset_end) {
- if (handle_gzip_req(req, lookup_flag))
- goto cachemiss;
- if ((tux_compression >= 2) && !req->content_gzipped)
- req->content_gzipped = 2;
- }
- if (req->parsed_len)
- trunc_headers(req);
-
- if (req->error)
- goto error;
-
- add_tux_atom(req, http_send_body);
- add_tux_atom(req, http_post_header);
-
- http_pre_header(req, req->method == METHOD_HEAD);
-
- add_req_to_workqueue(req);
- return;
-
-error:
- if (req->error)
- zap_request(req, cachemiss);
- return;
-
-usermode:
- add_req_to_workqueue(req);
-}
-
-static void http_post_header (tux_req_t *req, int cachemiss)
-{
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = req->output_len;
-#endif
- req->bytes_sent = 0; // data comes now.
-
- add_req_to_workqueue(req);
-}
-
-static void http_send_body (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- Dprintk("SEND req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
-
- SET_TIMESTAMP(req->output_timestamp);
-
- if (req->error) {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- /*
- * We are in the middle of a file transfer,
- * zap it immediately:
- */
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE.\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return;
- }
-
-repeat:
- ret = 0;
- if (!req->status)
- req->status = 200;
- if (req->method != METHOD_HEAD) {
- ret = generic_send_file(req, req->sock, cachemiss);
- Dprintk("body send-file returned: %d.\n", ret);
- } else {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- }
-
- switch (ret) {
- case -5:
- add_tux_atom(req, http_send_body);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, http_send_body);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- break;
- case -3:
- INC_STAT(static_sendfile_cachemisses);
- add_tux_atom(req, http_send_body);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-#define DEFAULT_DATE "Wed, 01 Jan 1970 00:00:01 GMT"
-
-char tux_date [DATE_LEN] = DEFAULT_DATE;
-
-/*
- * HTTP header
- */
-
-#define HEADER_PART1A \
- "HTTP/1.1 200 OK\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1B \
- "HTTP/1.1 200 OK"
-
-#define HEADER_PART1AP \
- "HTTP/1.1 206 Partial Content\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1BP \
- "HTTP/1.1 206 Partial Content"
-
-#define HEADER_PART1C \
- "HTTP/1.1 404 Page Not Found\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1D \
- "HTTP/1.1 200 OK\r\n" \
- "Content-Type: text/html\r\n" \
- "Connection: close\r\n"
-
-#define HEADER_PART2_keepalive "\r\nConnection: Keep-Alive\r\nDate: "
-
-#define HEADER_PART2_close "\r\nConnection: close\r\nDate: "
-
-#define HEADER_PART2_none "\r\nDate: "
-
-// date "%s"
-
-#define HEADER_PART3A "\r\nContent-Encoding: gzip"
-#define HEADER_PART3BX "\r\nContent-Length: "
-
-/*
- * Please acknowledge our hard work by not changing this define, or
- * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in
- * the ID string. Thanks! :-)
- */
-#define HEADER_PART3BY "\r\nServer: TUX/2.0 (Linux)\r\nContent-Length: "
-#define HEADER_PART3C "\r\nETag: \""
-#define HEADER_PART3ACC "\r\nAccept-Ranges: bytes"
-#define HEADER_PART3L "\r\nLast-Modified: "
-#define HEADER_PART3P "\r\nContent-Range: bytes "
-#define HEADER_PART3CA "\r\nCache-Control: max-age="
-#define HEADER_PART4 "\r\n\r\n"
-
-#define MAX_OUT_HEADER_LEN (sizeof(HEADER_PART1AP) + MAX_MIMETYPE_LEN + \
- sizeof(HEADER_PART2_keepalive) + DATE_LEN + \
- sizeof(HEADER_PART3A) + sizeof(HEADER_PART3BY) + \
- 12 + sizeof(HEADER_PART3C) + 21 + sizeof(HEADER_PART3L) + \
- sizeof(HEADER_PART3P) + 32 + \
- DATE_LEN + sizeof(HEADER_PART4) + sizeof(tux_extra_html_header) \
- + sizeof(HEADER_PART3CA) + MAX_CACHE_CONTROL_AGE_LEN)
-
-static void http_pre_header (tux_req_t *req, int head)
-{
- int partial = req->offset_start | req->offset_end;
- unsigned long flags;
- char *buf, *curr;
- mimetype_t *mime = NULL;
- int size;
-
-
- if (MAX_OUT_HEADER_LEN > PAGE_SIZE)
- TUX_BUG();
- if ((req->attr && req->attr->tcapi) || req->usermode)
- TUX_BUG();
-
-#define COPY_STATIC_PART(nr,curr) \
- do { \
- memcpy(curr, HEADER_PART##nr, sizeof(HEADER_PART##nr)-1); \
- curr += sizeof(HEADER_PART##nr)-1; \
- } while (0)
-
- buf = curr = get_abuf(req, MAX_OUT_HEADER_LEN);
-
- if (req->lookup_dir) {
- COPY_STATIC_PART(1D, curr);
- goto dir_next;
- }
- mime = req->attr->mime;
- if (!mime)
- TUX_BUG();
-
- if (req->status == 404) {
- COPY_STATIC_PART(1C, curr);
- memcpy(curr, mime->type, mime->type_len);
- curr += mime->type_len;
- } else {
- if (tux_noid && (mime == &default_mimetype)) {
- if (partial)
- COPY_STATIC_PART(1BP, curr);
- else
- COPY_STATIC_PART(1B, curr);
- } else {
- if (partial)
- COPY_STATIC_PART(1AP, curr);
- else
- COPY_STATIC_PART(1A, curr);
- memcpy(curr, mime->type, mime->type_len);
- curr += mime->type_len;
- }
- }
-
- if (tux_generate_cache_control && mime->expire_str_len) {
- COPY_STATIC_PART(3CA, curr);
- memcpy(curr, mime->expire_str, mime->expire_str_len);
- curr += mime->expire_str_len;
- }
-
- if (req->keep_alive /* && (req->version == HTTP_1_0) */)
- COPY_STATIC_PART(2_keepalive, curr);
- else if (!req->keep_alive && (req->version == HTTP_1_1))
- COPY_STATIC_PART(2_close, curr);
- else
- // HTTP/1.0 default means close
- COPY_STATIC_PART(2_none, curr);
-
-dir_next:
- memcpy(curr, tux_date, DATE_LEN-1);
- curr += DATE_LEN-1;
-
- if (req->content_gzipped)
- COPY_STATIC_PART(3A, curr);
-
- /*
- * Content-Length:
- */
- if (!req->lookup_dir) {
- if (tux_noid)
- COPY_STATIC_PART(3BX, curr);
- else
- COPY_STATIC_PART(3BY, curr);
-
- if (partial)
- curr += sprintf(curr, "%Ld", req->output_len);
- else {
- if (req->content_gzipped)
- curr += sprintf(curr, "%Ld",
- req->total_file_len);
- else {
- memcpy(curr, &req->etag, req->lendigits);
- curr += req->lendigits;
- }
- }
- if (tux_generate_etags && (req->status != 404)) {
- COPY_STATIC_PART(3C, curr);
- memcpy(curr, &req->etag, req->etaglen);
- curr += req->etaglen;
- curr[0] = '"';
- curr++;
- }
- if (tux_generate_last_mod || tux_generate_etags)
- COPY_STATIC_PART(3ACC, curr);
- }
- if (tux_generate_last_mod && (req->status != 404)) {
- COPY_STATIC_PART(3L, curr);
- last_mod_time(curr, req->mtime);
- curr += DATE_LEN-1;
- }
- if (partial) {
- COPY_STATIC_PART(3P, curr);
- curr += sprintf(curr, "%Ld-%Ld/%Ld", req->offset_start,
- req->offset_end-1, req->total_file_len);
- }
- COPY_STATIC_PART(4, curr);
- /*
- * Possibly add an extra HTML header:
- */
- if (tux_extra_html_header_size && mime && !strcmp(mime->type, "text/html")) {
- unsigned int len = tux_extra_html_header_size;
-
- memcpy(curr, tux_extra_html_header, len);
- curr += len;
- }
-
- size = curr-buf;
-
-#if CONFIG_TUX_DEBUG
- *curr = 0;
- Dprintk("{%s} [%d/%d]\n", buf, size, strlen(buf));
-#endif
-
- flags = MSG_DONTWAIT;
- if (!head)
- flags |= MSG_MORE;
- send_abuf(req, size, flags);
-}
-
-void http_illegal_request (tux_req_t *req, int cachemiss)
-{
- if (req->status == 304)
- send_ret_notmodified(req);
- else {
- if (req->status == 403)
- send_async_err_forbidden(req);
- else
- send_async_err_not_found(req);
- }
-}
-
-static int http_check_req_err (tux_req_t *req, int cachemiss)
-{
- if ((req->sock->sk->sk_state <= TCP_SYN_RECV) &&
- !tcp_sk(req->sock->sk)->urg_data)
- return 0;
- Dprintk("http_check_req_err(%p,%d): 1 (state: %d, urg: %d)\n",
- req, cachemiss, req->sock->sk->sk_state,
- tcp_sk(req->sock->sk)->urg_data);
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
-
- return 1;
-}
-
-#define COPY_STR(str) \
- do { memcpy(tmp, str, sizeof(str)-1); \
- tmp += sizeof(str)-1; } while (0)
-
-static char * http_print_dir_line (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode)
-{
- int len, spaces;
- loff_t size;
-
- switch (d_type) {
- case DT_DIR:
- COPY_STR("<IMG SRC=\"/icons/dir.gif\" ALT=\"[DIR]\">");
- break;
- case DT_REG:
- if ((d_len >= 3) &&
- (d_name[d_len-3] == '.') &&
- (d_name[d_len-2] == 'g') &&
- (d_name[d_len-1] == 'z'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 't') &&
- (d_name[d_len-2] == 'g') &&
- (d_name[d_len-1] == 'z'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 't') &&
- (d_name[d_len-2] == 'x') &&
- (d_name[d_len-1] == 't'))
- COPY_STR("<IMG SRC=\"/icons/text.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 'b') &&
- (d_name[d_len-2] == 'z') &&
- (d_name[d_len-1] == '2'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 'z') &&
- (d_name[d_len-2] == 'i') &&
- (d_name[d_len-1] == 'p'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- COPY_STR("<IMG SRC=\"/icons/file.gif\" ALT=\"[ ]\">");
- break;
- case DT_LNK:
- COPY_STR("<IMG SRC=\"/icons/link.gif\" ALT=\"[LNK]\">");
- break;
- default:
- if (tux_hide_unreadable)
- goto out_dput;
- COPY_STR("<IMG SRC=\"/icons/unknown.gif\" ALT=\"[ ]\">");
- break;
- }
-
-#define LIST_1 " <A HREF=\""
-#define LIST_2 "\">"
-#define LIST_2_DIR "/\">"
-#define LIST_3 "</A> "
-
- COPY_STR(LIST_1);
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
- if (d_type == DT_DIR)
- COPY_STR(LIST_2_DIR);
- else
- COPY_STR(LIST_2);
- spaces = 0;
- len = d_len;
-
- if (len > 25)
- len = 25;
- memcpy(tmp, d_name, len);
- tmp += len;
- if (len != d_len) {
- *tmp++ = '.';
- *tmp++ = '.';
- } else {
- if (d_type == DT_DIR)
- *tmp++ = '/';
- else
- spaces++;
- spaces++;
- }
- COPY_STR(LIST_3);
- while (spaces) {
- *tmp++ = ' ';
- spaces--;
- }
-#define FILL 25
- if (d_len < FILL) {
- memset(tmp, ' ', FILL-d_len);
- tmp += FILL-d_len;
- }
-
- tmp += time_unix2ls(inode->i_mtime.tv_sec, tmp);
- *tmp++ = ' ';
-
- if (d_type != DT_REG) {
- COPY_STR(" - ");
- goto out_size;
- }
- size = inode->i_size >> 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8Lik ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiM ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiG ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiT ", size);
- goto out_size;
- }
- size >>= 10;
- tmp += sprintf(tmp, "%8LiT ", size);
-
-out_size:
- *tmp++ = '\n';
- *tmp = 0;
-
- return tmp;
-out_dput:
- return NULL;
-}
-
-tux_proto_t tux_proto_http = {
- defer_accept: 1,
- can_redirect: 1,
- got_request: http_got_request,
- parse_message: parse_http_message,
- illegal_request: http_illegal_request,
- check_req_err: http_check_req_err,
- print_dir_line: http_print_dir_line,
- name: "http",
-};
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * redirect.c: redirect requests to other server sockets (such as Apache).
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static void dummy_destructor(struct open_request *req)
-{
-}
-
-static struct or_calltable dummy =
-{
- 0,
- NULL,
- NULL,
- &dummy_destructor,
- NULL
-};
-
-static int redirect_sock (tux_req_t *req, const int port)
-{
- struct socket *sock = req->sock;
- struct open_request *tcpreq;
- struct sock *sk, *oldsk;
- int err = -1;
-
- /*
- * Look up (optional) listening user-space socket.
- */
- local_bh_disable();
- sk = tcp_v4_lookup_listener(INADDR_ANY, port, 0);
- /*
- * Look up localhost listeners as well.
- */
- if (!sk) {
- u32 daddr;
- ((unsigned char *)&daddr)[0] = 127;
- ((unsigned char *)&daddr)[1] = 0;
- ((unsigned char *)&daddr)[2] = 0;
- ((unsigned char *)&daddr)[3] = 1;
- sk = tcp_v4_lookup_listener(daddr, port, 0);
- }
- local_bh_enable();
-
- /* No secondary server found */
- if (!sk)
- goto out;
-
- /*
- * Requeue the 'old' socket as an accept-socket of
- * the listening socket. This way we can shuffle
- * a socket around. Since we've read the input data
- * via the non-destructive MSG_PEEK, the secondary
- * server can be used transparently.
- */
- oldsk = sock->sk;
- lock_sock(sk);
-
- if (sk->sk_state != TCP_LISTEN)
- goto out_unlock;
-
- tcpreq = tcp_openreq_alloc();
- if (!tcpreq)
- goto out_unlock;
-
- unlink_tux_socket(req);
-
- sock->sk = NULL;
- sock->state = SS_UNCONNECTED;
-
- tcpreq->class = &dummy;
- write_lock_irq(&oldsk->sk_callback_lock);
- oldsk->sk_socket = NULL;
- oldsk->sk_sleep = NULL;
- write_unlock_irq(&oldsk->sk_callback_lock);
-
- tcp_sk(oldsk)->nonagle = 0;
-
- tcp_acceptq_queue(sk, tcpreq, oldsk);
-
- sk->sk_data_ready(sk, 0);
-
- /*
- * It's now completely up to the secondary
- * server to handle this request.
- */
- sock_release(req->sock);
- req->sock = NULL;
- req->parsed_len = 0;
- err = 0;
- Dprintk("req %p redirected to secondary server!\n", req);
-
-out_unlock:
- release_sock(sk);
- sock_put(sk);
-out:
- if (err)
- Dprintk("NO secondary server for req %p!\n", req);
- return err;
-}
-
-void redirect_request (tux_req_t *req, int cachemiss)
-{
- if (tux_TDprintk && (req->status != 304)) {
- TDprintk("trying to redirect req %p, req->error: %d, req->status: %d.\n", req, req->error, req->status);
- print_req(req);
- }
-
- if (cachemiss)
- TUX_BUG();
- if (req->error == TUX_ERROR_CONN_CLOSE)
- goto out_flush;
- if (!req->sock)
- TUX_BUG();
-
- if (!req->status)
- req->status = -1;
- if (!req->proto->can_redirect || (req->status == 304) || redirect_sock(req, tux_clientport)) {
- if (req->parsed_len)
- trunc_headers(req);
- req->proto->illegal_request(req, cachemiss);
- return;
- } else {
- if (req->data_sock)
- BUG();
- }
-out_flush:
- clear_keepalive(req);
- if (!tux_redirect_logging)
- req->status = 0;
- flush_request(req, cachemiss);
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * times.c: time conversion routines.
- *
- * Original time convserion code Copyright (C) 1999 by Arjan van de Ven
- */
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-
-
-#include "times.h"
-
-char *dayName[7] = {
- "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
-};
-
-static char *monthName[12] = {
- "Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
-};
-
-char itoa_h[60]={'0','0','0','0','0','0','0','0','0','0',
- '1','1','1','1','1','1','1','1','1','1',
- '2','2','2','2','2','2','2','2','2','2',
- '3','3','3','3','3','3','3','3','3','3',
- '4','4','4','4','4','4','4','4','4','4',
- '5','5','5','5','5','5','5','5','5','5'};
-
-char itoa_l[60]={'0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9'};
-
-int time_unix2ls(time_t zulu, char *buf)
-{
- int Y=0,M=0,D=0;
- int H=0,Min=0,S=0,WD=0;
- int I,I2;
- time_t rest, delta;
-
- if (zulu > xtime.tv_sec)
- zulu = xtime.tv_sec;
-
- I=0;
- while (I<TUX_NUMYEARS) {
- if (TimeDays[I][0]>zulu)
- break;
- I++;
- }
-
- Y=--I;
- if (I<0) {
- Y=0;
- goto BuildYear;
- }
- I2=0;
- while (I2<=12) {
- if (TimeDays[I][I2]>zulu)
- break;
- I2++;
- }
-
- M=I2-1;
-
- rest=zulu - TimeDays[Y][M];
- WD=WeekDays[Y][M];
- D=rest/86400;
- rest=rest%86400;
- WD+=D;
- WD=WD%7;
- H=rest/3600;
- rest=rest%3600;
- Min=rest/60;
- rest=rest%60;
- S=rest;
-
-BuildYear:
- Y+=TUX_YEAROFFSET;
-
-
- /* Format: Day, 01 Mon 1999 01:01:01 GMT */
-
- delta = xtime.tv_sec - zulu;
- if (delta > 6*30*24*60)
- // "May 23 2000"
- return sprintf( buf, "%s %02i %04i", monthName[M], D+1, Y);
- else
- // "May 23 10:14"
- return sprintf( buf, "%s %02i %02i:%02i",
- monthName[M], D+1, H, Min);
-}
-
-static int MonthHash[32] =
- {0,0,7,0,0,0,0,0,0,0,0,3,0,0,0,2,6,0,5,0,9,8,4,0,0,11,1,10,0,0,0,0};
-
-#define is_digit(c) ((c) >= '0' && (c) <= '9')
-
-static inline int skip_atoi(char **s)
-{
- int i=0;
-
- while (is_digit(**s))
- i = i*10 + *((*s)++) - '0';
- return i;
-}
-
-time_t mimetime_to_unixtime(char *Q)
-{
- int Y,M,D,H,Min,S;
- unsigned int Hash;
- time_t Temp;
- char *s,**s2;
-
- s=Q;
- s2=&s;
-
- if (strlen(s)<30) return 0;
- if (s[3]!=',') return 0;
- if (s[19]!=':') return 0;
-
- s+=5; /* Skip day of week */
- D = skip_atoi(s2); /* Day of month */
- s++;
- Hash = (char)s[0]+(char)s[2];
- Hash = (Hash<<1) + (char)s[1];
- Hash = (Hash&63)>>1;
- M = MonthHash[Hash];
- s+=4;
- Y = skip_atoi(s2); /* Year */
- s++;
- H = skip_atoi(s2); /* Hour */
- s++;
- Min = skip_atoi(s2); /* Minutes */
- s++;
- S = skip_atoi(s2); /* Seconds */
- s++;
- if ((s[0]!='G')||(s[1]!='M')||(s[2]!='T'))
- {
- return 0; /* No GMT */
- }
-
- if (Y<TUX_YEAROFFSET) Y = TUX_YEAROFFSET;
- if (Y>TUX_YEAROFFSET+9) Y = TUX_YEAROFFSET+9;
-
- Temp = TimeDays[Y-TUX_YEAROFFSET][M];
- Temp += D*86400+H*3600+Min*60+S;
-
- return Temp;
-}
-
-// writes the full http date, corresponding to time_t received
-
-void last_mod_time(char * curr, const time_t t)
-{
- int day, tod, year, wday, mon, hour, min, sec;
-
- tod = t % 86400;
- day = t / 86400;
- if (tod < 0) {
- tod += 86400;
- --day;
- }
-
- hour = tod / 3600;
- tod %= 3600;
- min = tod / 60;
- sec = tod % 60;
-
- wday = (day + 4) % 7;
- if (wday < 0)
- wday += 7;
-
- day -= 11017;
- /* day 0 is march 1, 2000 */
- year = 5 + day / 146097;
- day = day % 146097;
- if (day < 0) {
- day += 146097;
- --year;
- }
- /* from now on, day is nonnegative */
- year *= 4;
- if (day == 146096) {
- year += 3;
- day = 36524;
- } else {
- year += day / 36524;
- day %= 36524;
- }
- year *= 25;
- year += day / 1461;
- day %= 1461;
- year *= 4;
- if (day == 1460) {
- year += 3;
- day = 365;
- } else {
- year += day / 365;
- day %= 365;
- }
-
- day *= 10;
- mon = (day + 5) / 306;
- day = day + 5 - 306 * mon;
- day /= 10;
- if (mon >= 10) {
- ++year;
- mon -= 10;
- } else
- mon += 2;
-
- sprintf(curr, "%s, %.2d %s %d %.2d:%.2d:%.2d GMT", dayName[wday],
- day+1, monthName[mon], year, hour, min, sec);
-}
-
-// writes the full date in ISO8601 format,
-// corresponding to time_t received
-// example: 20011126224910
-
-int mdtm_time(char * curr, const time_t t)
-{
- int day, tod, year, wday, mon, hour, min, sec;
-
- tod = t % 86400;
- day = t / 86400;
- if (tod < 0) {
- tod += 86400;
- --day;
- }
-
- hour = tod / 3600;
- tod %= 3600;
- min = tod / 60;
- sec = tod % 60;
-
- wday = (day + 4) % 7;
- if (wday < 0)
- wday += 7;
-
- day -= 11017;
- /* day 0 is march 1, 2000 */
- year = 5 + day / 146097;
- day = day % 146097;
- if (day < 0) {
- day += 146097;
- --year;
- }
- /* from now on, day is nonnegative */
- year *= 4;
- if (day == 146096) {
- year += 3;
- day = 36524;
- } else {
- year += day / 36524;
- day %= 36524;
- }
- year *= 25;
- year += day / 1461;
- day %= 1461;
- year *= 4;
- if (day == 1460) {
- year += 3;
- day = 365;
- } else {
- year += day / 365;
- day %= 365;
- }
-
- day *= 10;
- mon = (day + 5) / 306;
- day = day + 5 - 306 * mon;
- day /= 10;
- if (mon >= 10) {
- ++year;
- mon -= 10;
- } else
- mon += 2;
-
- return sprintf(curr, "213 %.4d%.2d%.2d%.2d%.2d%.2d\r\n",
- year, mon+1, day+1, hour, min, sec);
-}
-
-static inline int make_num(const char *s)
-{
- if (*s >= '0' && *s <= '9')
- return 10 * (*s - '0') + *(s + 1) - '0';
- else
- return *(s + 1) - '0';
-}
-
-static inline int make_month(const char *s)
-{
- int i;
-
- for (i = 0; i < 12; i++)
- if (!strncmp(monthName[i], s, 3))
- return i+1;
- return 0;
-}
-
-time_t parse_time(const char *str, const int str_len)
-{
- int hour;
- int min;
- int sec;
- int mday;
- int mon;
- int year;
-
- if (str[3] == ',') {
- /* Thu, 09 Jan 1993 01:29:59 GMT */
-
- if (str_len < 29)
- return -1;
-
- mday = make_num(str+5);
- mon = make_month(str + 8);
- year = 100 * make_num(str + 12) + make_num(str + 14);
- hour = make_num(str + 17);
- min = make_num(str + 20);
- sec = make_num(str + 23);
- }
- else {
- const char *s;
- s = strchr(str, ',');
- if (!s || (str_len - (s - str) < 24)) {
- /* Wed Jun 9 01:29:59 1993 */
-
- if (str_len < 24)
- return -1;
-
- mon = make_month(str+4);
- mday = make_num(str+8);
- hour = make_num(str+11);
- min = make_num(str+14);
- sec = make_num(str+17);
- year = make_num(str+20)*100 + make_num(str+22);
- }
- else {
- /* Thursday, 10-Jun-93 01:29:59 GMT */
-
- mday = make_num(s + 2);
- mon = make_month(s + 5);
- year = make_num(s + 9) + 1900;
- if (year < 1970)
- year += 100;
- hour = make_num(s + 12);
- min = make_num(s + 15);
- sec = make_num(s + 18);
- }
- }
-
- if (sec < 0 || sec > 59)
- return -1;
- if (min < 0 || min > 59)
- return -1;
- if (hour < 0 || hour > 23)
- return -1;
- if (mday < 1 || mday > 31)
- return -1;
- if (mon < 1 || mon > 12)
- return -1;
- if (year < 1970 || year > 2020)
- return -1;
-
- return mktime(year, mon, mday, hour, min, sec);
-}
+++ /dev/null
-static time_t TimeDays[10][13] = {
- { 852073200, 854751600, 857170800, 859849200, 862441200, 865119600, 867711600, 870390000, 873068400, 875660400, 878338800, 880930800, 883609200 } ,
- { 883609200, 886287600, 888706800, 891385200, 893977200, 896655600, 899247600, 901926000, 904604400, 907196400, 909874800, 912466800, 915145200 } ,
- { 915145200, 917823600, 920242800, 922921200, 925513200, 928191600, 930783600, 933462000, 936140400, 938732400, 941410800, 944002800, 946681200 } ,
- { 946681200, 949359600, 951865200, 954543600, 957135600, 959814000, 962406000, 965084400, 967762800, 970354800, 973033200, 975625200, 978303600 } ,
- { 978303600, 980982000, 983401200, 986079600, 988671600, 991350000, 993942000, 996620400, 999298800, 1001890800, 1004569200, 1007161200, 1009839600 } ,
- { 1009839600, 1012518000, 1014937200, 1017615600, 1020207600, 1022886000, 1025478000, 1028156400, 1030834800, 1033426800, 1036105200, 1038697200, 1041375600 } ,
- { 1041375600, 1044054000, 1046473200, 1049151600, 1051743600, 1054422000, 1057014000, 1059692400, 1062370800, 1064962800, 1067641200, 1070233200, 1072911600 } ,
- { 1072911600, 1075590000, 1078095600, 1080774000, 1083366000, 1086044400, 1088636400, 1091314800, 1093993200, 1096585200, 1099263600, 1101855600, 1104534000 } ,
- { 1104534000, 1107212400, 1109631600, 1112310000, 1114902000, 1117580400, 1120172400, 1122850800, 1125529200, 1128121200, 1130799600, 1133391600, 1136070000 } ,
- { 1136070000, 1138748400, 1141167600, 1143846000, 1146438000, 1149116400, 1151708400, 1154386800, 1157065200, 1159657200, 1162335600, 1164927600, 1167606000 }
-};
-static int WeekDays[10][13] = {
- { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } ,
- { 4, 0, 0, 3, 5, 1, 3, 6, 2, 4, 0, 2, 5 } ,
- { 5, 1, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } ,
- { 6, 2, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 } ,
- { 1, 4, 4, 0, 2, 5, 0, 3, 6, 1, 4, 6, 2 } ,
- { 2, 5, 5, 1, 3, 6, 1, 4, 0, 2, 5, 0, 3 } ,
- { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } ,
- { 4, 0, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } ,
- { 6, 2, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4, 0 } ,
- { 0, 3, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 }
-};
-#define TUX_YEAROFFSET 1997
-#define TUX_NUMYEARS 10
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * userspace.c: handle userspace-module requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+
#include <linux/vs_context.h>
#include <linux/vs_network.h>
-#include <linux/vs_limit.h>
+
int sysctl_unix_max_dgram_qlen = 10;
mntput(mnt);
}
- vx_sock_dec(sk);
clr_vx_info(&sk->sk_vx_info);
clr_nx_info(&sk->sk_nx_info);
sock_put(sk);
sk_set_owner(sk, THIS_MODULE);
set_vx_info(&sk->sk_vx_info, current->vx_info);
- sk->sk_xid = vx_current_xid();
- vx_sock_inc(sk);
set_nx_info(&sk->sk_nx_info, current->nx_info);
+ sk->sk_xid = vx_current_xid();
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
EXPORT_SYMBOL(xfrm4_rcv);
EXPORT_SYMBOL(xfrm4_tunnel_register);
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
+EXPORT_SYMBOL(xfrm4_tunnel_check_size);
EXPORT_SYMBOL(xfrm_register_type);
EXPORT_SYMBOL(xfrm_unregister_type);
EXPORT_SYMBOL(xfrm_get_type);
return;
expired:
- read_unlock(&xp->lock);
km_policy_expired(xp, dir, 1);
xfrm_policy_delete(xp, dir);
xfrm_pol_put(xp);
write_lock_bh(&xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (pol) {
- if (dir < XFRM_POLICY_MAX)
- atomic_inc(&flow_cache_genid);
+ if (pol)
xfrm_policy_kill(pol);
- }
}
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
xfrm_put_type(x->type);
}
kfree(x);
+ wake_up(&km_waitq);
}
static void xfrm_state_gc_task(void *data)
x = list_entry(entry, struct xfrm_state, bydst);
xfrm_state_gc_destroy(x);
}
- wake_up(&km_waitq);
}
static inline unsigned long make_jiffies(long secs)
spin_lock_bh(&xfrm_state_lock);
x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
- if (x1) {
+ if (!x1) {
+ x1 = afinfo->find_acq(
+ x->props.mode, x->props.reqid, x->id.proto,
+ &x->id.daddr, &x->props.saddr, 0);
+ if (x1 && x1->id.spi != x->id.spi && x1->id.spi) {
+ xfrm_state_put(x1);
+ x1 = NULL;
+ }
+ }
+
+ if (x1 && x1->id.spi) {
xfrm_state_put(x1);
x1 = NULL;
err = -EEXIST;
goto out;
}
- x1 = afinfo->find_acq(
- x->props.mode, x->props.reqid, x->id.proto,
- &x->id.daddr, &x->props.saddr, 0);
-
__xfrm_state_insert(x);
err = 0;
for (h=0; h<maxspi-minspi+1; h++) {
spi = minspi + net_random()%(maxspi-minspi+1);
x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
- if (x0 == NULL) {
- x->id.spi = htonl(spi);
+ if (x0 == NULL)
break;
- }
xfrm_state_put(x0);
}
+ x->id.spi = htonl(spi);
}
if (x->id.spi) {
spin_lock_bh(&xfrm_state_lock);
if (err)
return err;
- xfrm_probe_algs();
-
x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
if (!x)
return err;
# docproc: Preprocess .tmpl file in order to generate .sgml docs
# conmakehash: Create arrays for initializing the kernel console tables
-host-progs := conmakehash kallsyms pnmtologo bin2c
-always := $(host-progs)
+host-progs := conmakehash kallsyms modpost mk_elfconfig pnmtologo bin2c
+always := $(host-progs) empty.o
+
+modpost-objs := modpost.o file2alias.o sumversion.o
subdir-$(CONFIG_MODVERSIONS) += genksyms
-subdir-y += mod
# Let clean descend into subdirs
subdir- += basic lxdialog kconfig package
+
+# dependencies on generated files need to be listed explicitly
+
+$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
+
+quiet_cmd_elfconfig = MKELF $@
+ cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
+
+$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
+ $(call if_changed,elfconfig)
+
+targets += elfconfig.h
# Step 2), invoke modpost
# Includes step 3,4
quiet_cmd_modpost = MODPOST
- cmd_modpost = scripts/mod/modpost \
+ cmd_modpost = scripts/modpost \
$(if $(KBUILD_EXTMOD),-i,-o) $(symverfile) \
$(filter-out FORCE,$^)
--- /dev/null
+cmd_scripts/basic/docproc := gcc -Wp,-MD,scripts/basic/.docproc.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -o scripts/basic/docproc scripts/basic/docproc.c
+
+deps_scripts/basic/docproc := \
+ scripts/basic/docproc.c \
+ /usr/include/stdio.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/include/stdlib.h \
+ /usr/include/sys/types.h \
+ /usr/include/time.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/time.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/alloca.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/include/ctype.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/limits.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/syslimits.h \
+ /usr/include/limits.h \
+ /usr/include/bits/posix1_lim.h \
+ /usr/include/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/bits/posix2_lim.h \
+ /usr/include/sys/wait.h \
+ /usr/include/signal.h \
+ /usr/include/bits/signum.h \
+ /usr/include/bits/siginfo.h \
+ /usr/include/bits/sigaction.h \
+ /usr/include/bits/sigcontext.h \
+ /usr/include/asm/sigcontext.h \
+ /usr/include/bits/sigstack.h \
+ /usr/include/bits/sigthread.h \
+ /usr/include/sys/resource.h \
+ /usr/include/bits/resource.h \
+ /usr/include/bits/waitflags.h \
+ /usr/include/bits/waitstatus.h \
+
+scripts/basic/docproc: $(deps_scripts/basic/docproc)
+
+$(deps_scripts/basic/docproc):
--- /dev/null
+cmd_scripts/basic/fixdep := gcc -Wp,-MD,scripts/basic/.fixdep.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -o scripts/basic/fixdep scripts/basic/fixdep.c
+
+deps_scripts/basic/fixdep := \
+ scripts/basic/fixdep.c \
+ $(wildcard include/config/his/driver.h) \
+ $(wildcard include/config/my/option.h) \
+ $(wildcard include/config/.h) \
+ $(wildcard include/config/foo.h) \
+ /usr/include/sys/types.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/time.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/time.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/sys/stat.h \
+ /usr/include/bits/stat.h \
+ /usr/include/sys/mman.h \
+ /usr/include/bits/mman.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+ /usr/include/fcntl.h \
+ /usr/include/bits/fcntl.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/include/stdlib.h \
+ /usr/include/alloca.h \
+ /usr/include/stdio.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/limits.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/syslimits.h \
+ /usr/include/limits.h \
+ /usr/include/bits/posix1_lim.h \
+ /usr/include/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/bits/posix2_lim.h \
+ /usr/include/ctype.h \
+ /usr/include/netinet/in.h \
+ /usr/include/stdint.h \
+ /usr/include/sys/socket.h \
+ /usr/include/sys/uio.h \
+ /usr/include/bits/uio.h \
+ /usr/include/bits/socket.h \
+ /usr/include/bits/sockaddr.h \
+ /usr/include/asm/socket.h \
+ /usr/include/asm/sockios.h \
+ /usr/include/bits/in.h \
+ /usr/include/bits/byteswap.h \
+
+scripts/basic/fixdep: $(deps_scripts/basic/fixdep)
+
+$(deps_scripts/basic/fixdep):
--- /dev/null
+cmd_scripts/basic/split-include := gcc -Wp,-MD,scripts/basic/.split-include.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -o scripts/basic/split-include scripts/basic/split-include.c
+
+deps_scripts/basic/split-include := \
+ scripts/basic/split-include.c \
+ $(wildcard include/config/.h) \
+ /usr/include/sys/stat.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/time.h \
+ /usr/include/bits/stat.h \
+ /usr/include/sys/types.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/time.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/ctype.h \
+ /usr/include/errno.h \
+ /usr/include/bits/errno.h \
+ /usr/include/linux/errno.h \
+ /usr/include/asm/errno.h \
+ /usr/include/fcntl.h \
+ /usr/include/bits/fcntl.h \
+ /usr/include/stdio.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/include/stdlib.h \
+ /usr/include/alloca.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+
+scripts/basic/split-include: $(deps_scripts/basic/split-include)
+
+$(deps_scripts/basic/split-include):
/* Big exception to the "don't include kernel headers into userspace, which
* even potentially has different endianness and word sizes, since
* we handle those differences explicitly below */
-#include "../../include/linux/mod_devicetable.h"
+#include "../include/linux/mod_devicetable.h"
#define ADD(str, sep, cond, field) \
do { \
71, 94, 92, 82, 0, 0, 62, 0, 63, 0,
62, 63, 0, 64, 0, 65, 0, 5, 0, 16,
0, 20, 0, 11, 0, 13, 0, 66, 0, 70,
- 0, 27, 46, 62, 47, 0, 21, 36, 0, 23,
+ 0, 27, 46, 65, 47, 0, 21, 36, 0, 23,
36, 0, 10, 36, 0, 21, 36, 84, 0, 23,
36, 84, 0, 10, 36, 31, 0, 10, 31, 0,
21, 84, 0, 23, 84, 0, 7, 0, 18, 0,
};
static const short yypact[] = {-32768,
- 15,-32768, 197,-32768, 23,-32768,-32768,-32768,-32768,-32768,
+ 19,-32768, 175,-32768, 32,-32768,-32768,-32768,-32768,-32768,
-18,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
--32768, -28,-32768, -25,-32768,-32768,-32768, -26, -22, -12,
--32768,-32768,-32768,-32768, 49, 493,-32768,-32768,-32768,-32768,
--32768,-32768,-32768,-32768,-32768,-32768,-32768, 27, -8, 101,
--32768, 493, -8,-32768, 493, 10,-32768,-32768, 11, 9,
- 18, 26,-32768, 49, -15, -13,-32768,-32768,-32768, 25,
- 24, 48, 149,-32768,-32768, 49,-32768, 414, 39, 40,
- 47,-32768, 9,-32768,-32768, 49,-32768,-32768,-32768, 66,
--32768, 241,-32768,-32768, 50,-32768, 5, 65, 42, 66,
- 17, 56, 55,-32768,-32768,-32768, 60,-32768, 75,-32768,
- 80,-32768,-32768,-32768,-32768,-32768, 81, 82, 370, 85,
- 98, 89,-32768,-32768, 88,-32768, 91,-32768,-32768,-32768,
--32768, 284,-32768, 24,-32768, 103,-32768,-32768,-32768,-32768,
--32768, 8, 43,-32768, 30,-32768,-32768, 457,-32768,-32768,
- 92, 93,-32768,-32768, 95,-32768, 96,-32768,-32768, 327,
--32768,-32768,-32768,-32768,-32768,-32768, 99, 104,-32768,-32768,
- 148,-32768
+-32768, -30,-32768, -26,-32768,-32768,-32768, -32, -10, -2,
+-32768,-32768,-32768,-32768, 2, 428,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768, 34, 12, 79,
+-32768, 428, 12,-32768, 455, 33,-32768,-32768, 15, 14,
+ 35, 29,-32768, 2, -14, -21,-32768,-32768,-32768, 67,
+ 31, 37, 127,-32768,-32768, 2,-32768, 54, 60, 66,
+ 69,-32768, 14,-32768,-32768, 2,-32768,-32768,-32768, 84,
+-32768, 219,-32768,-32768, 70,-32768, 20, 91, 72, 84,
+ -20, 74, 81,-32768,-32768,-32768, 86,-32768, 102,-32768,
+ 106,-32768,-32768,-32768,-32768,-32768, 109, 108, 348, 112,
+ 126, 117,-32768,-32768, 118,-32768, 122,-32768,-32768,-32768,
+-32768, 262,-32768, 31,-32768, 131,-32768,-32768,-32768,-32768,
+-32768, 7, 120,-32768, -9,-32768,-32768, 392,-32768,-32768,
+ 125, 130,-32768,-32768, 132,-32768, 159,-32768,-32768, 305,
+-32768,-32768,-32768,-32768,-32768,-32768, 160, 161,-32768,-32768,
+ 174,-32768
};
static const short yypgoto[] = {-32768,
- 152,-32768,-32768,-32768, 119,-32768,-32768, 94, 0, -55,
- -35,-32768,-32768,-32768, -69,-32768,-32768, -56, -30,-32768,
- -76,-32768, -122,-32768,-32768, 29, -62,-32768,-32768,-32768,
--32768, -17,-32768,-32768, 105,-32768,-32768, 52, 86, 83,
+ 208,-32768,-32768,-32768, 158,-32768,-32768, 128, 0, -90,
+ -36,-32768, 157,-32768, -70,-32768,-32768, -51, -31,-32768,
+ -40,-32768, -125,-32768,-32768, 65, -97,-32768,-32768,-32768,
+-32768, -19,-32768,-32768, 143,-32768,-32768, 83, 124, 141,
-32768,-32768,-32768
};
-#define YYLAST 533
-
-
-static const short yytable[] = { 78,
- 67, 99, 35, 84, 65, 125, 54, 49, 155, 152,
- 53, 80, 47, 88, 171, 89, 9, 48, 91, 55,
- 127, 50, 129, 56, 50, 18, 114, 99, 81, 99,
- 57, 69, 92, 87, 27, 77, 119, 168, 31, -89,
- 126, 50, 67, 140, 96, 79, 58, 156, 131, 143,
- 97, 76, 60, 142, -89, 60, 59, 68, 60, 95,
- 85, 159, 132, 96, 99, 45, 46, 93, 94, 97,
- 86, 60, 143, 143, 98, 160, 119, 126, 140, 157,
- 158, 96, 156, 67, 58, 111, 112, 97, 142, 60,
- 60, 106, 119, 113, 59, 116, 60, 128, 133, 134,
- 98, 70, 93, 88, 119, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 135, 24, 25, 26, 27, 28, 139, 136,
- 31, 146, 147, 148, 149, 154, -19, 150, 163, 164,
- 32, 165, 166, -19, -103, 169, -19, 172, -19, 107,
- 170, -19, 4, 6, 7, 8, 9, 10, 11, 12,
+#define YYLAST 495
+
+
+static const short yytable[] = { 67,
+ 99, 119, 35, 65, 54, 49, 152, 155, 84, 53,
+ 91, 131, 47, 55, 88, 80, 89, 48, 171, 50,
+ 125, 9, 159, 50, 92, 132, 99, 81, 99, 69,
+ 18, 114, 87, 77, 168, 56, 160, 58, -89, 27,
+ 57, 119, 140, 31, 157, 158, 156, 59, 143, 60,
+ 58, 76, 142, -89, 60, 126, 127, 119, 129, 96,
+ 59, 50, 60, 99, 68, 97, 95, 60, 79, 119,
+ 96, 143, 143, 86, 45, 46, 97, 85, 60, 70,
+ 106, 98, 67, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 75, 24, 25, 26, 27, 28, 162, 108, 31, 115,
- 124, 0, 130, 0, -19, 153, 0, 0, 32, 0,
- 0, -19, -104, 0, -19, 0, -19, 5, 0, -19,
- 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 0, 0, 0,
- 0, 0, -19, 0, 0, 0, 32, 0, 0, -19,
- 0, 118, -19, 0, -19, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 0, 24, 25, 26, 27, 28, 0, 0,
- 31, 0, 0, 0, 0, -82, 0, 0, 0, 0,
- 32, 0, 0, 0, 151, 0, 0, -82, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
- 28, 0, 0, 31, 0, 0, 0, 0, -82, 0,
- 0, 0, 0, 32, 0, 0, 0, 167, 0, 0,
- -82, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
- 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
- 0, -82, 0, 0, 0, 0, 32, 0, 0, 0,
- 0, 0, 0, -82, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 0, 24, 25, 26, 27, 28, 0, 0, 31,
- 0, 0, 0, 0, 0, 140, 0, 0, 0, 141,
- 0, 0, 0, 0, 0, 142, 0, 60, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
- 28, 0, 0, 31, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 32, 0, 0, 0, 0, 0, 0,
- 110, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
- 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
- 0, 161, 0, 0, 0, 0, 32, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 0, 24, 25, 26, 27, 28,
- 0, 0, 31, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 32
+ 110, 24, 25, 26, 27, 28, 111, 126, 31, 93,
+ 94, 96, 112, 116, -19, 113, 133, 97, 32, 60,
+ 98, -19, -103, 128, -19, 134, -19, 107, 93, -19,
+ 88, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 135, 24,
+ 25, 26, 27, 28, 139, 140, 31, 136, 146, 156,
+ 147, 148, -19, 154, 149, 142, 32, 60, 150, -19,
+ -104, 163, -19, 172, -19, 5, 164, -19, 165, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 166, 169, 170, 4, 75,
+ -19, 78, 162, 115, 32, 108, 153, -19, 124, 118,
+ -19, 0, -19, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 130, 24, 25, 26, 27, 28, 0, 0, 31, 0,
+ 0, 0, 0, -82, 0, 0, 0, 0, 32, 0,
+ 0, 0, 151, 0, 0, -82, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
+ 0, 31, 0, 0, 0, 0, -82, 0, 0, 0,
+ 0, 32, 0, 0, 0, 167, 0, 0, -82, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 0, 24, 25, 26,
+ 27, 28, 0, 0, 31, 0, 0, 0, 0, -82,
+ 0, 0, 0, 0, 32, 0, 0, 0, 0, 0,
+ 0, -82, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
+ 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
+ 0, 0, 0, 140, 0, 0, 0, 141, 0, 0,
+ 0, 0, 0, 142, 0, 60, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
+ 0, 31, 0, 0, 0, 0, 161, 0, 0, 0,
+ 0, 32, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
+ 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
+ 7, 8, 9, 10, 11, 0, 13, 32, 15, 16,
+ 0, 18, 19, 20, 0, 22, 0, 24, 25, 26,
+ 27, 28, 0, 0, 31, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 32
};
-static const short yycheck[] = { 55,
- 36, 71, 3, 60, 35, 1, 24, 36, 1, 132,
- 36, 1, 31, 29, 0, 31, 8, 36, 32, 46,
- 97, 50, 99, 46, 50, 17, 83, 97, 59, 99,
- 43, 49, 46, 64, 26, 53, 92, 160, 30, 32,
- 36, 50, 78, 36, 40, 36, 36, 40, 32, 119,
- 46, 52, 48, 46, 47, 48, 46, 31, 48, 36,
- 43, 32, 46, 40, 134, 43, 44, 43, 44, 46,
- 45, 48, 142, 143, 51, 46, 132, 36, 36, 142,
- 143, 40, 40, 119, 36, 47, 47, 46, 46, 48,
- 48, 44, 148, 47, 46, 30, 48, 33, 43, 45,
- 51, 1, 43, 29, 160, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 43, 23, 24, 25, 26, 27, 47, 49,
- 30, 47, 35, 45, 47, 33, 36, 47, 47, 47,
- 40, 47, 47, 43, 44, 47, 46, 0, 48, 1,
- 47, 51, 1, 5, 6, 7, 8, 9, 10, 11,
+static const short yycheck[] = { 36,
+ 71, 92, 3, 35, 24, 36, 132, 1, 60, 36,
+ 32, 32, 31, 46, 29, 1, 31, 36, 0, 50,
+ 1, 8, 32, 50, 46, 46, 97, 59, 99, 49,
+ 17, 83, 64, 53, 160, 46, 46, 36, 32, 26,
+ 43, 132, 36, 30, 142, 143, 40, 46, 119, 48,
+ 36, 52, 46, 47, 48, 36, 97, 148, 99, 40,
+ 46, 50, 48, 134, 31, 46, 36, 48, 36, 160,
+ 40, 142, 143, 45, 43, 44, 46, 43, 48, 1,
+ 44, 51, 119, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 52, 23, 24, 25, 26, 27, 148, 73, 30, 86,
- 95, -1, 100, -1, 36, 134, -1, -1, 40, -1,
- -1, 43, 44, -1, 46, -1, 48, 1, -1, 51,
- -1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, -1, -1, -1,
- -1, -1, 36, -1, -1, -1, 40, -1, -1, 43,
- -1, 1, 46, -1, 48, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, -1, 23, 24, 25, 26, 27, -1, -1,
- 30, -1, -1, -1, -1, 35, -1, -1, -1, -1,
- 40, -1, -1, -1, 1, -1, -1, 47, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
- 27, -1, -1, 30, -1, -1, -1, -1, 35, -1,
- -1, -1, -1, 40, -1, -1, -1, 1, -1, -1,
- 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
- 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
- -1, 35, -1, -1, -1, -1, 40, -1, -1, -1,
- -1, -1, -1, 47, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, -1, 23, 24, 25, 26, 27, -1, -1, 30,
- -1, -1, -1, -1, -1, 36, -1, -1, -1, 40,
- -1, -1, -1, -1, -1, 46, -1, 48, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
- 27, -1, -1, 30, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 40, -1, -1, -1, -1, -1, -1,
- 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
- 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
- -1, 35, -1, -1, -1, -1, 40, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, -1, 23, 24, 25, 26, 27,
- -1, -1, 30, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 40
+ 47, 23, 24, 25, 26, 27, 47, 36, 30, 43,
+ 44, 40, 47, 30, 36, 47, 43, 46, 40, 48,
+ 51, 43, 44, 33, 46, 45, 48, 1, 43, 51,
+ 29, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 43, 23,
+ 24, 25, 26, 27, 47, 36, 30, 49, 47, 40,
+ 35, 45, 36, 33, 47, 46, 40, 48, 47, 43,
+ 44, 47, 46, 0, 48, 1, 47, 51, 47, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 47, 47, 47, 1, 52,
+ 36, 55, 148, 86, 40, 73, 134, 43, 95, 1,
+ 46, -1, 48, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 100, 23, 24, 25, 26, 27, -1, -1, 30, -1,
+ -1, -1, -1, 35, -1, -1, -1, -1, 40, -1,
+ -1, -1, 1, -1, -1, 47, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
+ -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
+ -1, 40, -1, -1, -1, 1, -1, -1, 47, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, -1, 23, 24, 25,
+ 26, 27, -1, -1, 30, -1, -1, -1, -1, 35,
+ -1, -1, -1, -1, 40, -1, -1, -1, -1, -1,
+ -1, 47, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
+ -1, -1, -1, 36, -1, -1, -1, 40, -1, -1,
+ -1, -1, -1, 46, -1, 48, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
+ -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
+ -1, 40, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
+ 6, 7, 8, 9, 10, -1, 12, 40, 14, 15,
+ -1, 17, 18, 19, -1, 21, -1, 23, 24, 25,
+ 26, 27, -1, -1, 30, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 40
};
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/lib/bison.simple"
--- /dev/null
+cmd_scripts/kconfig/conf := gcc -o scripts/kconfig/conf scripts/kconfig/conf.o -Wl,-rpath,\$$ORIGIN -Lscripts/kconfig -lkconfig
--- /dev/null
+cmd_scripts/kconfig/conf.o := gcc -Wp,-MD,scripts/kconfig/.conf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -c -o scripts/kconfig/conf.o scripts/kconfig/conf.c
+
+deps_scripts/kconfig/conf.o := \
+ scripts/kconfig/conf.c \
+ $(wildcard include/config/.h) \
+ /usr/include/ctype.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/include/stdlib.h \
+ /usr/include/sys/types.h \
+ /usr/include/time.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/time.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/alloca.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+ /usr/include/sys/stat.h \
+ /usr/include/bits/stat.h \
+ scripts/kconfig/lkc.h \
+ scripts/kconfig/expr.h \
+ /usr/include/stdio.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdbool.h \
+ scripts/kconfig/lkc_proto.h \
+
+scripts/kconfig/conf.o: $(deps_scripts/kconfig/conf.o)
+
+$(deps_scripts/kconfig/conf.o):
--- /dev/null
+cmd_scripts/kconfig/libkconfig.so := gcc -shared -o scripts/kconfig/libkconfig.so scripts/kconfig/zconf.tab.o
--- /dev/null
+cmd_scripts/kconfig/mconf.o := gcc -Wp,-MD,scripts/kconfig/.mconf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -c -o scripts/kconfig/mconf.o scripts/kconfig/mconf.c
+
+deps_scripts/kconfig/mconf.o := \
+ scripts/kconfig/mconf.c \
+ $(wildcard include/config/.h) \
+ $(wildcard include/config/mode.h) \
+ /usr/include/sys/ioctl.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/include/bits/ioctls.h \
+ /usr/include/asm/ioctls.h \
+ /usr/include/asm/ioctl.h \
+ /usr/include/bits/ioctl-types.h \
+ /usr/include/sys/ttydefaults.h \
+ /usr/include/sys/wait.h \
+ /usr/include/signal.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/bits/signum.h \
+ /usr/include/time.h \
+ /usr/include/bits/siginfo.h \
+ /usr/include/bits/sigaction.h \
+ /usr/include/bits/sigcontext.h \
+ /usr/include/asm/sigcontext.h \
+ /usr/include/bits/sigstack.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/bits/sigthread.h \
+ /usr/include/sys/resource.h \
+ /usr/include/bits/resource.h \
+ /usr/include/bits/time.h \
+ /usr/include/bits/waitflags.h \
+ /usr/include/bits/waitstatus.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/include/ctype.h \
+ /usr/include/errno.h \
+ /usr/include/bits/errno.h \
+ /usr/include/linux/errno.h \
+ /usr/include/asm/errno.h \
+ /usr/include/fcntl.h \
+ /usr/include/bits/fcntl.h \
+ /usr/include/sys/types.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/limits.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/syslimits.h \
+ /usr/include/limits.h \
+ /usr/include/bits/posix1_lim.h \
+ /usr/include/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/bits/posix2_lim.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/stdlib.h \
+ /usr/include/alloca.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/include/termios.h \
+ /usr/include/bits/termios.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+ scripts/kconfig/lkc.h \
+ scripts/kconfig/expr.h \
+ /usr/include/stdio.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdbool.h \
+ scripts/kconfig/lkc_proto.h \
+
+scripts/kconfig/mconf.o: $(deps_scripts/kconfig/mconf.o)
+
+$(deps_scripts/kconfig/mconf.o):
--- /dev/null
+cmd_scripts/kconfig/zconf.tab.o := gcc -Wp,-MD,scripts/kconfig/.zconf.tab.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -Iscripts/kconfig -fPIC -c -o scripts/kconfig/zconf.tab.o scripts/kconfig/zconf.tab.c
+
+deps_scripts/kconfig/zconf.tab.o := \
+ scripts/kconfig/zconf.tab.c \
+ /usr/include/ctype.h \
+ /usr/include/features.h \
+ /usr/include/sys/cdefs.h \
+ /usr/include/gnu/stubs.h \
+ /usr/include/bits/types.h \
+ /usr/include/bits/wordsize.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stddef.h \
+ /usr/include/bits/typesizes.h \
+ /usr/include/endian.h \
+ /usr/include/bits/endian.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdarg.h \
+ /usr/include/stdio.h \
+ /usr/include/libio.h \
+ /usr/include/_G_config.h \
+ /usr/include/wchar.h \
+ /usr/include/bits/wchar.h \
+ /usr/include/gconv.h \
+ /usr/include/bits/stdio_lim.h \
+ /usr/include/bits/sys_errlist.h \
+ /usr/include/bits/stdio.h \
+ /usr/include/stdlib.h \
+ /usr/include/sys/types.h \
+ /usr/include/time.h \
+ /usr/include/sys/select.h \
+ /usr/include/bits/select.h \
+ /usr/include/bits/sigset.h \
+ /usr/include/bits/time.h \
+ /usr/include/sys/sysmacros.h \
+ /usr/include/bits/pthreadtypes.h \
+ /usr/include/bits/sched.h \
+ /usr/include/alloca.h \
+ /usr/include/string.h \
+ /usr/include/bits/string.h \
+ /usr/include/bits/string2.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/stdbool.h \
+ scripts/kconfig/lkc.h \
+ scripts/kconfig/expr.h \
+ scripts/kconfig/lkc_proto.h \
+ scripts/kconfig/lex.zconf.c \
+ /usr/include/errno.h \
+ /usr/include/bits/errno.h \
+ /usr/include/linux/errno.h \
+ /usr/include/asm/errno.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/limits.h \
+ /usr/lib/gcc-lib/i386-redhat-linux/3.3.3/include/syslimits.h \
+ /usr/include/limits.h \
+ /usr/include/bits/posix1_lim.h \
+ /usr/include/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/bits/posix2_lim.h \
+ /usr/include/unistd.h \
+ /usr/include/bits/posix_opt.h \
+ /usr/include/bits/confname.h \
+ /usr/include/getopt.h \
+ scripts/kconfig/confdata.c \
+ $(wildcard include/config/.h) \
+ /usr/include/sys/stat.h \
+ /usr/include/bits/stat.h \
+ scripts/kconfig/expr.c \
+ scripts/kconfig/symbol.c \
+ /usr/include/sys/utsname.h \
+ /usr/include/bits/utsname.h \
+ scripts/kconfig/menu.c \
+
+scripts/kconfig/zconf.tab.o: $(deps_scripts/kconfig/zconf.tab.o)
+
+$(deps_scripts/kconfig/zconf.tab.o):
# Kernel configuration targets
# These targets are used from top-level makefile
-export LD_LIBRARY_PATH = scripts/kconfig
-
.PHONY: oldconfig xconfig gconfig menuconfig config silentoldconfig
xconfig: $(obj)/qconf
--- /dev/null
+
+#line 3 "lex.zconf.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+#define YY_FLEX_SUBMINOR_VERSION 31
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#endif /* ! FLEXINT_H */
+
+#ifdef __cplusplus
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+#if __STDC__
+
+#define YY_USE_CONST
+
+#endif /* __STDC__ */
+#endif /* ! __cplusplus */
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE zconfrestart(zconfin )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#define YY_BUF_SIZE 16384
+#endif
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+extern int zconfleng;
+
+extern FILE *zconfin, *zconfout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up zconftext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+/* The following is because we cannot portably get our hands on size_t
+ * (without autoconf's help, which isn't available because we want
+ * flex-generated scanners to compile on their own).
+ */
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef unsigned int yy_size_t;
+#endif
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via zconfrestart()), so that the user can continue scanning by
+ * just pointing zconfin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when zconftext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int zconfleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = (char *) 0;
+static int yy_init = 1; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow zconfwrap()'s to do buffer switches
+ * instead of setting up a fresh zconfin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void zconfrestart (FILE *input_file );
+void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size );
+void zconf_delete_buffer (YY_BUFFER_STATE b );
+void zconf_flush_buffer (YY_BUFFER_STATE b );
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer );
+void zconfpop_buffer_state (void );
+
+static void zconfensure_buffer_stack (void );
+static void zconf_load_buffer_state (void );
+static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file );
+
+#define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size );
+YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str );
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len );
+
+void *zconfalloc (yy_size_t );
+void *zconfrealloc (void *,yy_size_t );
+void zconffree (void * );
+
+#define yy_new_buffer zconf_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define zconfwrap(n) 1
+#define YY_SKIP_YYWRAP
+
+typedef unsigned char YY_CHAR;
+
+FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0;
+
+typedef int yy_state_type;
+
+extern int zconflineno;
+
+int zconflineno = 1;
+
+extern char *zconftext;
+#define yytext_ptr zconftext
+static yyconst flex_int16_t yy_nxt[][38] =
+ {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 18, 18, 19, 20,
+ 21, 22, 18, 18, 23, 24, 18, 25, 18, 26,
+ 27, 18, 28, 29, 30, 18, 18, 16
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 18, 18, 19, 20,
+ 21, 22, 18, 18, 23, 24, 18, 25, 18, 26,
+ 27, 18, 28, 29, 30, 18, 18, 16
+
+ },
+
+ {
+ 11, 31, 32, 33, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31
+ },
+
+ {
+ 11, 31, 32, 33, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31
+ },
+
+ {
+ 11, 34, 34, 35, 34, 36, 34, 34, 36, 34,
+ 34, 34, 34, 34, 34, 37, 34, 34, 34, 34,
+
+ 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
+ 34, 34, 34, 34, 34, 34, 34, 34
+ },
+
+ {
+ 11, 34, 34, 35, 34, 36, 34, 34, 36, 34,
+ 34, 34, 34, 34, 34, 37, 34, 34, 34, 34,
+ 34, 34, 34, 34, 34, 34, 34, 34, 34, 34,
+ 34, 34, 34, 34, 34, 34, 34, 34
+ },
+
+ {
+ 11, 38, 38, 39, 40, 41, 42, 43, 41, 44,
+ 45, 46, 47, 47, 48, 49, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 50, 47, 47, 47, 51,
+ 47, 47, 47, 47, 47, 47, 47, 52
+
+ },
+
+ {
+ 11, 38, 38, 39, 40, 41, 42, 43, 41, 44,
+ 45, 46, 47, 47, 48, 49, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 50, 47, 47, 47, 51,
+ 47, 47, 47, 47, 47, 47, 47, 52
+ },
+
+ {
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11
+ },
+
+ {
+ 11, -12, -12, -12, -12, -12, -12, -12, -12, -12,
+ -12, -12, -12, -12, -12, -12, -12, -12, -12, -12,
+
+ -12, -12, -12, -12, -12, -12, -12, -12, -12, -12,
+ -12, -12, -12, -12, -12, -12, -12, -12
+ },
+
+ {
+ 11, -13, 53, 54, -13, -13, 55, -13, -13, -13,
+ -13, -13, -13, -13, -13, -13, -13, -13, -13, -13,
+ -13, -13, -13, -13, -13, -13, -13, -13, -13, -13,
+ -13, -13, -13, -13, -13, -13, -13, -13
+ },
+
+ {
+ 11, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14, -14
+
+ },
+
+ {
+ 11, 56, 56, 57, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56
+ },
+
+ {
+ 11, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+ -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+ -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+ -16, -16, -16, -16, -16, -16, -16, -16
+ },
+
+ {
+ 11, -17, -17, -17, -17, -17, -17, -17, -17, -17,
+ -17, -17, -17, -17, -17, -17, -17, -17, -17, -17,
+
+ -17, -17, -17, -17, -17, -17, -17, -17, -17, -17,
+ -17, -17, -17, -17, -17, -17, -17, -17
+ },
+
+ {
+ 11, -18, -18, -18, -18, -18, -18, -18, -18, -18,
+ -18, -18, -18, 58, -18, -18, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -18
+ },
+
+ {
+ 11, -19, -19, -19, -19, -19, -19, -19, -19, -19,
+ -19, -19, -19, 58, -19, -19, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 59,
+ 58, 58, 58, 58, 58, 58, 58, -19
+
+ },
+
+ {
+ 11, -20, -20, -20, -20, -20, -20, -20, -20, -20,
+ -20, -20, -20, 58, -20, -20, 58, 58, 58, 58,
+ 58, 58, 58, 58, 60, 58, 58, 58, 58, 61,
+ 58, 58, 58, 58, 58, 58, 58, -20
+ },
+
+ {
+ 11, -21, -21, -21, -21, -21, -21, -21, -21, -21,
+ -21, -21, -21, 58, -21, -21, 58, 58, 58, 58,
+ 58, 62, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -21
+ },
+
+ {
+ 11, -22, -22, -22, -22, -22, -22, -22, -22, -22,
+ -22, -22, -22, 58, -22, -22, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 63, 58,
+ 58, 58, 58, 58, 58, 58, 58, -22
+ },
+
+ {
+ 11, -23, -23, -23, -23, -23, -23, -23, -23, -23,
+ -23, -23, -23, 58, -23, -23, 58, 58, 58, 58,
+ 58, 64, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -23
+ },
+
+ {
+ 11, -24, -24, -24, -24, -24, -24, -24, -24, -24,
+ -24, -24, -24, 58, -24, -24, 58, 58, 58, 58,
+ 58, 58, 65, 58, 58, 58, 58, 58, 66, 58,
+ 58, 58, 58, 58, 58, 58, 58, -24
+
+ },
+
+ {
+ 11, -25, -25, -25, -25, -25, -25, -25, -25, -25,
+ -25, -25, -25, 58, -25, -25, 58, 67, 58, 58,
+ 58, 68, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -25
+ },
+
+ {
+ 11, -26, -26, -26, -26, -26, -26, -26, -26, -26,
+ -26, -26, -26, 58, -26, -26, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 69, 58, 58, 58, 58, 58, 58, -26
+ },
+
+ {
+ 11, -27, -27, -27, -27, -27, -27, -27, -27, -27,
+ -27, -27, -27, 58, -27, -27, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 70, 58, 58, 58, 58, -27
+ },
+
+ {
+ 11, -28, -28, -28, -28, -28, -28, -28, -28, -28,
+ -28, -28, -28, 58, -28, -28, 58, 71, 58, 58,
+ 58, 72, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -28
+ },
+
+ {
+ 11, -29, -29, -29, -29, -29, -29, -29, -29, -29,
+ -29, -29, -29, 58, -29, -29, 58, 58, 58, 58,
+ 58, 73, 58, 58, 58, 58, 58, 58, 58, 74,
+ 58, 58, 58, 58, 75, 58, 58, -29
+
+ },
+
+ {
+ 11, -30, -30, -30, -30, -30, -30, -30, -30, -30,
+ -30, -30, -30, 58, -30, -30, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 76, 58, 58, 58, 58, -30
+ },
+
+ {
+ 11, 77, 77, -31, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 77, 77
+ },
+
+ {
+ 11, -32, 78, 79, -32, -32, -32, -32, -32, -32,
+ -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+
+ -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+ -32, -32, -32, -32, -32, -32, -32, -32
+ },
+
+ {
+ 11, 80, -33, -33, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80
+ },
+
+ {
+ 11, 81, 81, 82, 81, -34, 81, 81, -34, 81,
+ 81, 81, 81, 81, 81, -34, 81, 81, 81, 81,
+ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ 81, 81, 81, 81, 81, 81, 81, 81
+
+ },
+
+ {
+ 11, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+ -35, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+ -35, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+ -35, -35, -35, -35, -35, -35, -35, -35
+ },
+
+ {
+ 11, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36, -36
+ },
+
+ {
+ 11, 83, 83, 84, 83, 83, 83, 83, 83, 83,
+ 83, 83, 83, 83, 83, 83, 83, 83, 83, 83,
+
+ 83, 83, 83, 83, 83, 83, 83, 83, 83, 83,
+ 83, 83, 83, 83, 83, 83, 83, 83
+ },
+
+ {
+ 11, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+ -38, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+ -38, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+ -38, -38, -38, -38, -38, -38, -38, -38
+ },
+
+ {
+ 11, -39, -39, -39, -39, -39, -39, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39, -39
+
+ },
+
+ {
+ 11, -40, -40, -40, -40, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, 85, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, -40, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, -40, -40, -40, -40
+ },
+
+ {
+ 11, -41, -41, -41, -41, -41, -41, -41, -41, -41,
+ -41, -41, -41, -41, -41, -41, -41, -41, -41, -41,
+ -41, -41, -41, -41, -41, -41, -41, -41, -41, -41,
+ -41, -41, -41, -41, -41, -41, -41, -41
+ },
+
+ {
+ 11, 86, 86, -42, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86
+ },
+
+ {
+ 11, -43, -43, -43, -43, -43, -43, 87, -43, -43,
+ -43, -43, -43, -43, -43, -43, -43, -43, -43, -43,
+ -43, -43, -43, -43, -43, -43, -43, -43, -43, -43,
+ -43, -43, -43, -43, -43, -43, -43, -43
+ },
+
+ {
+ 11, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, -44, -44, -44, -44, -44
+
+ },
+
+ {
+ 11, -45, -45, -45, -45, -45, -45, -45, -45, -45,
+ -45, -45, -45, -45, -45, -45, -45, -45, -45, -45,
+ -45, -45, -45, -45, -45, -45, -45, -45, -45, -45,
+ -45, -45, -45, -45, -45, -45, -45, -45
+ },
+
+ {
+ 11, -46, -46, -46, -46, -46, -46, -46, -46, -46,
+ -46, 88, 89, 89, -46, -46, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -46
+ },
+
+ {
+ 11, -47, -47, -47, -47, -47, -47, -47, -47, -47,
+ -47, 89, 89, 89, -47, -47, 89, 89, 89, 89,
+
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -47
+ },
+
+ {
+ 11, -48, -48, -48, -48, -48, -48, -48, -48, -48,
+ -48, -48, -48, -48, -48, -48, -48, -48, -48, -48,
+ -48, -48, -48, -48, -48, -48, -48, -48, -48, -48,
+ -48, -48, -48, -48, -48, -48, -48, -48
+ },
+
+ {
+ 11, -49, -49, 90, -49, -49, -49, -49, -49, -49,
+ -49, -49, -49, -49, -49, -49, -49, -49, -49, -49,
+ -49, -49, -49, -49, -49, -49, -49, -49, -49, -49,
+ -49, -49, -49, -49, -49, -49, -49, -49
+
+ },
+
+ {
+ 11, -50, -50, -50, -50, -50, -50, -50, -50, -50,
+ -50, 89, 89, 89, -50, -50, 89, 89, 89, 89,
+ 89, 89, 91, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -50
+ },
+
+ {
+ 11, -51, -51, -51, -51, -51, -51, -51, -51, -51,
+ -51, 89, 89, 89, -51, -51, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 92, 89,
+ 89, 89, 89, 89, 89, 89, 89, -51
+ },
+
+ {
+ 11, -52, -52, -52, -52, -52, -52, -52, -52, -52,
+ -52, -52, -52, -52, -52, -52, -52, -52, -52, -52,
+
+ -52, -52, -52, -52, -52, -52, -52, -52, -52, -52,
+ -52, -52, -52, -52, -52, -52, -52, 93
+ },
+
+ {
+ 11, -53, 53, 54, -53, -53, 55, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53, -53
+ },
+
+ {
+ 11, -54, -54, -54, -54, -54, -54, -54, -54, -54,
+ -54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
+ -54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
+ -54, -54, -54, -54, -54, -54, -54, -54
+
+ },
+
+ {
+ 11, 56, 56, 57, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56
+ },
+
+ {
+ 11, 56, 56, 57, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56
+ },
+
+ {
+ 11, -57, -57, -57, -57, -57, -57, -57, -57, -57,
+ -57, -57, -57, -57, -57, -57, -57, -57, -57, -57,
+
+ -57, -57, -57, -57, -57, -57, -57, -57, -57, -57,
+ -57, -57, -57, -57, -57, -57, -57, -57
+ },
+
+ {
+ 11, -58, -58, -58, -58, -58, -58, -58, -58, -58,
+ -58, -58, -58, 58, -58, -58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -58
+ },
+
+ {
+ 11, -59, -59, -59, -59, -59, -59, -59, -59, -59,
+ -59, -59, -59, 58, -59, -59, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 94,
+ 58, 58, 58, 58, 58, 58, 58, -59
+
+ },
+
+ {
+ 11, -60, -60, -60, -60, -60, -60, -60, -60, -60,
+ -60, -60, -60, 58, -60, -60, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 95,
+ 58, 58, 58, 58, 58, 58, 58, -60
+ },
+
+ {
+ 11, -61, -61, -61, -61, -61, -61, -61, -61, -61,
+ -61, -61, -61, 58, -61, -61, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 96, 97, 58,
+ 58, 58, 58, 58, 58, 58, 58, -61
+ },
+
+ {
+ 11, -62, -62, -62, -62, -62, -62, -62, -62, -62,
+ -62, -62, -62, 58, -62, -62, 58, 58, 58, 58,
+
+ 58, 58, 98, 58, 58, 58, 58, 58, 58, 58,
+ 99, 58, 58, 58, 58, 58, 58, -62
+ },
+
+ {
+ 11, -63, -63, -63, -63, -63, -63, -63, -63, -63,
+ -63, -63, -63, 58, -63, -63, 58, 100, 58, 58,
+ 101, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -63
+ },
+
+ {
+ 11, -64, -64, -64, -64, -64, -64, -64, -64, -64,
+ -64, -64, -64, 58, -64, -64, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 102, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 103, -64
+
+ },
+
+ {
+ 11, -65, -65, -65, -65, -65, -65, -65, -65, -65,
+ -65, -65, -65, 58, -65, -65, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -65
+ },
+
+ {
+ 11, -66, -66, -66, -66, -66, -66, -66, -66, -66,
+ -66, -66, -66, 58, -66, -66, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 104, 58, 58, -66
+ },
+
+ {
+ 11, -67, -67, -67, -67, -67, -67, -67, -67, -67,
+ -67, -67, -67, 58, -67, -67, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 105, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -67
+ },
+
+ {
+ 11, -68, -68, -68, -68, -68, -68, -68, -68, -68,
+ -68, -68, -68, 58, -68, -68, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 106, 58,
+ 58, 58, 58, 58, 58, 58, 58, -68
+ },
+
+ {
+ 11, -69, -69, -69, -69, -69, -69, -69, -69, -69,
+ -69, -69, -69, 58, -69, -69, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 107, 58, 58, -69
+
+ },
+
+ {
+ 11, -70, -70, -70, -70, -70, -70, -70, -70, -70,
+ -70, -70, -70, 58, -70, -70, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 108,
+ 58, 58, 58, 58, 58, 58, 58, -70
+ },
+
+ {
+ 11, -71, -71, -71, -71, -71, -71, -71, -71, -71,
+ -71, -71, -71, 58, -71, -71, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 109, 58,
+ 58, 58, 58, 58, 58, 58, 58, -71
+ },
+
+ {
+ 11, -72, -72, -72, -72, -72, -72, -72, -72, -72,
+ -72, -72, -72, 58, -72, -72, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 110, 58, 58, 58, 58, 58, -72
+ },
+
+ {
+ 11, -73, -73, -73, -73, -73, -73, -73, -73, -73,
+ -73, -73, -73, 58, -73, -73, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 111, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -73
+ },
+
+ {
+ 11, -74, -74, -74, -74, -74, -74, -74, -74, -74,
+ -74, -74, -74, 58, -74, -74, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 112, 58, -74
+
+ },
+
+ {
+ 11, -75, -75, -75, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, 58, -75, -75, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 113, 58, 58, 58, 58, -75
+ },
+
+ {
+ 11, -76, -76, -76, -76, -76, -76, -76, -76, -76,
+ -76, -76, -76, 58, -76, -76, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 114, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -76
+ },
+
+ {
+ 11, 77, 77, -77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 77, 77, 77, 77,
+
+ 77, 77, 77, 77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 77, 77
+ },
+
+ {
+ 11, -78, 78, 79, -78, -78, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78
+ },
+
+ {
+ 11, 80, -79, -79, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80
+
+ },
+
+ {
+ 11, -80, -80, -80, -80, -80, -80, -80, -80, -80,
+ -80, -80, -80, -80, -80, -80, -80, -80, -80, -80,
+ -80, -80, -80, -80, -80, -80, -80, -80, -80, -80,
+ -80, -80, -80, -80, -80, -80, -80, -80
+ },
+
+ {
+ 11, 81, 81, 82, 81, -81, 81, 81, -81, 81,
+ 81, 81, 81, 81, 81, -81, 81, 81, 81, 81,
+ 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
+ 81, 81, 81, 81, 81, 81, 81, 81
+ },
+
+ {
+ 11, -82, -82, -82, -82, -82, -82, -82, -82, -82,
+ -82, -82, -82, -82, -82, -82, -82, -82, -82, -82,
+
+ -82, -82, -82, -82, -82, -82, -82, -82, -82, -82,
+ -82, -82, -82, -82, -82, -82, -82, -82
+ },
+
+ {
+ 11, -83, -83, 84, -83, -83, -83, -83, -83, -83,
+ -83, -83, -83, -83, -83, -83, -83, -83, -83, -83,
+ -83, -83, -83, -83, -83, -83, -83, -83, -83, -83,
+ -83, -83, -83, -83, -83, -83, -83, -83
+ },
+
+ {
+ 11, -84, -84, -84, -84, -84, -84, -84, -84, -84,
+ -84, -84, -84, -84, -84, -84, -84, -84, -84, -84,
+ -84, -84, -84, -84, -84, -84, -84, -84, -84, -84,
+ -84, -84, -84, -84, -84, -84, -84, -84
+
+ },
+
+ {
+ 11, -85, -85, -85, -85, -85, -85, -85, -85, -85,
+ -85, -85, -85, -85, -85, -85, -85, -85, -85, -85,
+ -85, -85, -85, -85, -85, -85, -85, -85, -85, -85,
+ -85, -85, -85, -85, -85, -85, -85, -85
+ },
+
+ {
+ 11, 86, 86, -86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86
+ },
+
+ {
+ 11, -87, -87, -87, -87, -87, -87, -87, -87, -87,
+ -87, -87, -87, -87, -87, -87, -87, -87, -87, -87,
+
+ -87, -87, -87, -87, -87, -87, -87, -87, -87, -87,
+ -87, -87, -87, -87, -87, -87, -87, -87
+ },
+
+ {
+ 11, -88, -88, -88, -88, -88, -88, -88, -88, -88,
+ -88, 115, 89, 89, -88, -88, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -88
+ },
+
+ {
+ 11, -89, -89, -89, -89, -89, -89, -89, -89, -89,
+ -89, 89, 89, 89, -89, -89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -89
+
+ },
+
+ {
+ 11, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, -90, -90, -90, -90, -90, -90, -90
+ },
+
+ {
+ 11, -91, -91, -91, -91, -91, -91, -91, -91, -91,
+ -91, 89, 89, 89, -91, -91, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -91
+ },
+
+ {
+ 11, -92, -92, -92, -92, -92, -92, -92, -92, -92,
+ -92, 89, 89, 89, -92, -92, 89, 89, 89, 89,
+
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -92
+ },
+
+ {
+ 11, -93, -93, -93, -93, -93, -93, -93, -93, -93,
+ -93, -93, -93, -93, -93, -93, -93, -93, -93, -93,
+ -93, -93, -93, -93, -93, -93, -93, -93, -93, -93,
+ -93, -93, -93, -93, -93, -93, -93, -93
+ },
+
+ {
+ 11, -94, -94, -94, -94, -94, -94, -94, -94, -94,
+ -94, -94, -94, 58, -94, -94, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 116, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -94
+
+ },
+
+ {
+ 11, -95, -95, -95, -95, -95, -95, -95, -95, -95,
+ -95, -95, -95, 58, -95, -95, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 117, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -95
+ },
+
+ {
+ 11, -96, -96, -96, -96, -96, -96, -96, -96, -96,
+ -96, -96, -96, 58, -96, -96, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 118, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -96
+ },
+
+ {
+ 11, -97, -97, -97, -97, -97, -97, -97, -97, -97,
+ -97, -97, -97, 58, -97, -97, 58, 58, 58, 58,
+
+ 58, 58, 119, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -97
+ },
+
+ {
+ 11, -98, -98, -98, -98, -98, -98, -98, -98, -98,
+ -98, -98, -98, 58, -98, -98, 120, 121, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -98
+ },
+
+ {
+ 11, -99, -99, -99, -99, -99, -99, -99, -99, -99,
+ -99, -99, -99, 58, -99, -99, 58, 58, 58, 58,
+ 58, 122, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -99
+
+ },
+
+ {
+ 11, -100, -100, -100, -100, -100, -100, -100, -100, -100,
+ -100, -100, -100, 58, -100, -100, 58, 58, 123, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -100
+ },
+
+ {
+ 11, -101, -101, -101, -101, -101, -101, -101, -101, -101,
+ -101, -101, -101, 58, -101, -101, 58, 58, 58, 124,
+ 58, 58, 58, 58, 58, 125, 58, 126, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -101
+ },
+
+ {
+ 11, -102, -102, -102, -102, -102, -102, -102, -102, -102,
+ -102, -102, -102, 58, -102, -102, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 127, 58, 58, 58, 58, 58, 58, -102
+ },
+
+ {
+ 11, -103, -103, -103, -103, -103, -103, -103, -103, -103,
+ -103, -103, -103, 58, -103, -103, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -103
+ },
+
+ {
+ 11, -104, -104, -104, -104, -104, -104, -104, -104, -104,
+ -104, -104, -104, 58, -104, -104, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -104
+
+ },
+
+ {
+ 11, -105, -105, -105, -105, -105, -105, -105, -105, -105,
+ -105, -105, -105, 58, -105, -105, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 128, 58,
+ 58, 58, 58, 58, 58, 58, 58, -105
+ },
+
+ {
+ 11, -106, -106, -106, -106, -106, -106, -106, -106, -106,
+ -106, -106, -106, 58, -106, -106, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 129, 58, -106
+ },
+
+ {
+ 11, -107, -107, -107, -107, -107, -107, -107, -107, -107,
+ -107, -107, -107, 58, -107, -107, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 130, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -107
+ },
+
+ {
+ 11, -108, -108, -108, -108, -108, -108, -108, -108, -108,
+ -108, -108, -108, 58, -108, -108, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 131, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -108
+ },
+
+ {
+ 11, -109, -109, -109, -109, -109, -109, -109, -109, -109,
+ -109, -109, -109, 58, -109, -109, 58, 58, 58, 58,
+ 58, 58, 58, 132, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -109
+
+ },
+
+ {
+ 11, -110, -110, -110, -110, -110, -110, -110, -110, -110,
+ -110, -110, -110, 58, -110, -110, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 133, 58, -110
+ },
+
+ {
+ 11, -111, -111, -111, -111, -111, -111, -111, -111, -111,
+ -111, -111, -111, 58, -111, -111, 58, 58, 58, 58,
+ 58, 134, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -111
+ },
+
+ {
+ 11, -112, -112, -112, -112, -112, -112, -112, -112, -112,
+ -112, -112, -112, 58, -112, -112, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 135, 58, 58, 58, 58, -112
+ },
+
+ {
+ 11, -113, -113, -113, -113, -113, -113, -113, -113, -113,
+ -113, -113, -113, 58, -113, -113, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 136, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -113
+ },
+
+ {
+ 11, -114, -114, -114, -114, -114, -114, -114, -114, -114,
+ -114, -114, -114, 58, -114, -114, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 137, 58, 58, 58, -114
+
+ },
+
+ {
+ 11, -115, -115, -115, -115, -115, -115, -115, -115, -115,
+ -115, 89, 89, 89, -115, -115, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, 89, 89, 89,
+ 89, 89, 89, 89, 89, 89, 89, -115
+ },
+
+ {
+ 11, -116, -116, -116, -116, -116, -116, -116, -116, -116,
+ -116, -116, -116, 58, -116, -116, 58, 58, 58, 58,
+ 58, 138, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -116
+ },
+
+ {
+ 11, -117, -117, -117, -117, -117, -117, -117, -117, -117,
+ -117, -117, -117, 58, -117, -117, 58, 58, 58, 139,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -117
+ },
+
+ {
+ 11, -118, -118, -118, -118, -118, -118, -118, -118, -118,
+ -118, -118, -118, 58, -118, -118, 58, 58, 58, 58,
+ 58, 140, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -118
+ },
+
+ {
+ 11, -119, -119, -119, -119, -119, -119, -119, -119, -119,
+ -119, -119, -119, 58, -119, -119, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 141, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -119
+
+ },
+
+ {
+ 11, -120, -120, -120, -120, -120, -120, -120, -120, -120,
+ -120, -120, -120, 58, -120, -120, 58, 58, 142, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 143, 58, 58, -120
+ },
+
+ {
+ 11, -121, -121, -121, -121, -121, -121, -121, -121, -121,
+ -121, -121, -121, 58, -121, -121, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 144, 58, -121
+ },
+
+ {
+ 11, -122, -122, -122, -122, -122, -122, -122, -122, -122,
+ -122, -122, -122, 58, -122, -122, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 145, 58,
+ 58, 58, 58, 58, 58, 58, 58, -122
+ },
+
+ {
+ 11, -123, -123, -123, -123, -123, -123, -123, -123, -123,
+ -123, -123, -123, 58, -123, -123, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 146, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -123
+ },
+
+ {
+ 11, -124, -124, -124, -124, -124, -124, -124, -124, -124,
+ -124, -124, -124, 58, -124, -124, 58, 58, 58, 58,
+ 58, 58, 58, 58, 147, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -124
+
+ },
+
+ {
+ 11, -125, -125, -125, -125, -125, -125, -125, -125, -125,
+ -125, -125, -125, 58, -125, -125, 58, 58, 58, 58,
+ 58, 58, 148, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -125
+ },
+
+ {
+ 11, -126, -126, -126, -126, -126, -126, -126, -126, -126,
+ -126, -126, -126, 58, -126, -126, 58, 58, 58, 58,
+ 58, 149, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -126
+ },
+
+ {
+ 11, -127, -127, -127, -127, -127, -127, -127, -127, -127,
+ -127, -127, -127, 58, -127, -127, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -127
+ },
+
+ {
+ 11, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+ -128, -128, -128, 58, -128, -128, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 150, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -128
+ },
+
+ {
+ 11, -129, -129, -129, -129, -129, -129, -129, -129, -129,
+ -129, -129, -129, 58, -129, -129, 58, 58, 58, 151,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -129
+
+ },
+
+ {
+ 11, -130, -130, -130, -130, -130, -130, -130, -130, -130,
+ -130, -130, -130, 58, -130, -130, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 152,
+ 58, 58, 58, 58, 58, 58, 58, -130
+ },
+
+ {
+ 11, -131, -131, -131, -131, -131, -131, -131, -131, -131,
+ -131, -131, -131, 58, -131, -131, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 153, 58, 58, 58, 58, 58, 58, -131
+ },
+
+ {
+ 11, -132, -132, -132, -132, -132, -132, -132, -132, -132,
+ -132, -132, -132, 58, -132, -132, 58, 58, 58, 58,
+
+ 58, 154, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -132
+ },
+
+ {
+ 11, -133, -133, -133, -133, -133, -133, -133, -133, -133,
+ -133, -133, -133, 58, -133, -133, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 155, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -133
+ },
+
+ {
+ 11, -134, -134, -134, -134, -134, -134, -134, -134, -134,
+ -134, -134, -134, 58, -134, -134, 58, 58, 58, 156,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -134
+
+ },
+
+ {
+ 11, -135, -135, -135, -135, -135, -135, -135, -135, -135,
+ -135, -135, -135, 58, -135, -135, 58, 58, 58, 157,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -135
+ },
+
+ {
+ 11, -136, -136, -136, -136, -136, -136, -136, -136, -136,
+ -136, -136, -136, 58, -136, -136, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 158, 58,
+ 58, 58, 58, 58, 58, 58, 58, -136
+ },
+
+ {
+ 11, -137, -137, -137, -137, -137, -137, -137, -137, -137,
+ -137, -137, -137, 58, -137, -137, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 159, 58, 58, -137
+ },
+
+ {
+ 11, -138, -138, -138, -138, -138, -138, -138, -138, -138,
+ -138, -138, -138, 58, -138, -138, 58, 160, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -138
+ },
+
+ {
+ 11, -139, -139, -139, -139, -139, -139, -139, -139, -139,
+ -139, -139, -139, 58, -139, -139, 58, 58, 58, 58,
+ 58, 161, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -139
+
+ },
+
+ {
+ 11, -140, -140, -140, -140, -140, -140, -140, -140, -140,
+ -140, -140, -140, 58, -140, -140, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 162, 58,
+ 58, 58, 58, 58, 58, 58, 58, -140
+ },
+
+ {
+ 11, -141, -141, -141, -141, -141, -141, -141, -141, -141,
+ -141, -141, -141, 58, -141, -141, 58, 58, 58, 58,
+ 58, 58, 58, 163, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -141
+ },
+
+ {
+ 11, -142, -142, -142, -142, -142, -142, -142, -142, -142,
+ -142, -142, -142, 58, -142, -142, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 164,
+ 58, 58, 58, 58, 58, 58, 58, -142
+ },
+
+ {
+ 11, -143, -143, -143, -143, -143, -143, -143, -143, -143,
+ -143, -143, -143, 58, -143, -143, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 165, 58, 58, 58, 58, -143
+ },
+
+ {
+ 11, -144, -144, -144, -144, -144, -144, -144, -144, -144,
+ -144, -144, -144, 58, -144, -144, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 166, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -144
+
+ },
+
+ {
+ 11, -145, -145, -145, -145, -145, -145, -145, -145, -145,
+ -145, -145, -145, 58, -145, -145, 58, 58, 58, 58,
+ 167, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -145
+ },
+
+ {
+ 11, -146, -146, -146, -146, -146, -146, -146, -146, -146,
+ -146, -146, -146, 58, -146, -146, 58, 58, 58, 58,
+ 58, 168, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -146
+ },
+
+ {
+ 11, -147, -147, -147, -147, -147, -147, -147, -147, -147,
+ -147, -147, -147, 58, -147, -147, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 169,
+ 58, 58, 58, 58, 58, 58, 58, -147
+ },
+
+ {
+ 11, -148, -148, -148, -148, -148, -148, -148, -148, -148,
+ -148, -148, -148, 58, -148, -148, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -148
+ },
+
+ {
+ 11, -149, -149, -149, -149, -149, -149, -149, -149, -149,
+ -149, -149, -149, 58, -149, -149, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 170, 58,
+ 58, 58, 58, 58, 58, 58, 58, -149
+
+ },
+
+ {
+ 11, -150, -150, -150, -150, -150, -150, -150, -150, -150,
+ -150, -150, -150, 58, -150, -150, 58, 58, 58, 58,
+ 58, 171, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -150
+ },
+
+ {
+ 11, -151, -151, -151, -151, -151, -151, -151, -151, -151,
+ -151, -151, -151, 58, -151, -151, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 172,
+ 58, 58, 58, 58, 58, 58, 58, -151
+ },
+
+ {
+ 11, -152, -152, -152, -152, -152, -152, -152, -152, -152,
+ -152, -152, -152, 58, -152, -152, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 173, 58,
+ 58, 58, 58, 58, 58, 58, 58, -152
+ },
+
+ {
+ 11, -153, -153, -153, -153, -153, -153, -153, -153, -153,
+ -153, -153, -153, 58, -153, -153, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 174, 58, 58, -153
+ },
+
+ {
+ 11, -154, -154, -154, -154, -154, -154, -154, -154, -154,
+ -154, -154, -154, 58, -154, -154, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -154
+
+ },
+
+ {
+ 11, -155, -155, -155, -155, -155, -155, -155, -155, -155,
+ -155, -155, -155, 58, -155, -155, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 175, 58, 58, 58, 58, -155
+ },
+
+ {
+ 11, -156, -156, -156, -156, -156, -156, -156, -156, -156,
+ -156, -156, -156, 58, -156, -156, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 176, 58, 58, -156
+ },
+
+ {
+ 11, -157, -157, -157, -157, -157, -157, -157, -157, -157,
+ -157, -157, -157, 58, -157, -157, 58, 58, 58, 58,
+
+ 58, 177, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -157
+ },
+
+ {
+ 11, -158, -158, -158, -158, -158, -158, -158, -158, -158,
+ -158, -158, -158, 58, -158, -158, 58, 58, 58, 58,
+ 58, 58, 58, 178, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -158
+ },
+
+ {
+ 11, -159, -159, -159, -159, -159, -159, -159, -159, -159,
+ -159, -159, -159, 58, -159, -159, 58, 179, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -159
+
+ },
+
+ {
+ 11, -160, -160, -160, -160, -160, -160, -160, -160, -160,
+ -160, -160, -160, 58, -160, -160, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 180, 58,
+ 58, 58, 58, 58, 58, 58, 58, -160
+ },
+
+ {
+ 11, -161, -161, -161, -161, -161, -161, -161, -161, -161,
+ -161, -161, -161, 58, -161, -161, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -161
+ },
+
+ {
+ 11, -162, -162, -162, -162, -162, -162, -162, -162, -162,
+ -162, -162, -162, 58, -162, -162, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 181, 58, 58, -162
+ },
+
+ {
+ 11, -163, -163, -163, -163, -163, -163, -163, -163, -163,
+ -163, -163, -163, 58, -163, -163, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -163
+ },
+
+ {
+ 11, -164, -164, -164, -164, -164, -164, -164, -164, -164,
+ -164, -164, -164, 58, -164, -164, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 182,
+ 58, 58, 58, 58, 58, 58, 58, -164
+
+ },
+
+ {
+ 11, -165, -165, -165, -165, -165, -165, -165, -165, -165,
+ -165, -165, -165, 58, -165, -165, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 183, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -165
+ },
+
+ {
+ 11, -166, -166, -166, -166, -166, -166, -166, -166, -166,
+ -166, -166, -166, 58, -166, -166, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 184, 58, 58, -166
+ },
+
+ {
+ 11, -167, -167, -167, -167, -167, -167, -167, -167, -167,
+ -167, -167, -167, 58, -167, -167, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 185, 58, 58, 58, -167
+ },
+
+ {
+ 11, -168, -168, -168, -168, -168, -168, -168, -168, -168,
+ -168, -168, -168, 58, -168, -168, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -168
+ },
+
+ {
+ 11, -169, -169, -169, -169, -169, -169, -169, -169, -169,
+ -169, -169, -169, 58, -169, -169, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 186, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -169
+
+ },
+
+ {
+ 11, -170, -170, -170, -170, -170, -170, -170, -170, -170,
+ -170, -170, -170, 58, -170, -170, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 187, 58, -170
+ },
+
+ {
+ 11, -171, -171, -171, -171, -171, -171, -171, -171, -171,
+ -171, -171, -171, 58, -171, -171, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 188, 58,
+ 58, 58, 58, 58, 58, 58, 58, -171
+ },
+
+ {
+ 11, -172, -172, -172, -172, -172, -172, -172, -172, -172,
+ -172, -172, -172, 58, -172, -172, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 189, 58,
+ 58, 58, 58, 58, 58, 58, 58, -172
+ },
+
+ {
+ 11, -173, -173, -173, -173, -173, -173, -173, -173, -173,
+ -173, -173, -173, 58, -173, -173, 58, 190, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -173
+ },
+
+ {
+ 11, -174, -174, -174, -174, -174, -174, -174, -174, -174,
+ -174, -174, -174, 58, -174, -174, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -174
+
+ },
+
+ {
+ 11, -175, -175, -175, -175, -175, -175, -175, -175, -175,
+ -175, -175, -175, 58, -175, -175, 58, 58, 58, 58,
+ 58, 191, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -175
+ },
+
+ {
+ 11, -176, -176, -176, -176, -176, -176, -176, -176, -176,
+ -176, -176, -176, 58, -176, -176, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -176
+ },
+
+ {
+ 11, -177, -177, -177, -177, -177, -177, -177, -177, -177,
+ -177, -177, -177, 58, -177, -177, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -177
+ },
+
+ {
+ 11, -178, -178, -178, -178, -178, -178, -178, -178, -178,
+ -178, -178, -178, 58, -178, -178, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -178
+ },
+
+ {
+ 11, -179, -179, -179, -179, -179, -179, -179, -179, -179,
+ -179, -179, -179, 58, -179, -179, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 192, 58, 58, -179
+
+ },
+
+ {
+ 11, -180, -180, -180, -180, -180, -180, -180, -180, -180,
+ -180, -180, -180, 58, -180, -180, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -180
+ },
+
+ {
+ 11, -181, -181, -181, -181, -181, -181, -181, -181, -181,
+ -181, -181, -181, 58, -181, -181, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -181
+ },
+
+ {
+ 11, -182, -182, -182, -182, -182, -182, -182, -182, -182,
+ -182, -182, -182, 58, -182, -182, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 193, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -182
+ },
+
+ {
+ 11, -183, -183, -183, -183, -183, -183, -183, -183, -183,
+ -183, -183, -183, 58, -183, -183, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 194, 58, 58, 58, -183
+ },
+
+ {
+ 11, -184, -184, -184, -184, -184, -184, -184, -184, -184,
+ -184, -184, -184, 58, -184, -184, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -184
+
+ },
+
+ {
+ 11, -185, -185, -185, -185, -185, -185, -185, -185, -185,
+ -185, -185, -185, 58, -185, -185, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -185
+ },
+
+ {
+ 11, -186, -186, -186, -186, -186, -186, -186, -186, -186,
+ -186, -186, -186, 58, -186, -186, 58, 58, 58, 195,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -186
+ },
+
+ {
+ 11, -187, -187, -187, -187, -187, -187, -187, -187, -187,
+ -187, -187, -187, 58, -187, -187, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -187
+ },
+
+ {
+ 11, -188, -188, -188, -188, -188, -188, -188, -188, -188,
+ -188, -188, -188, 58, -188, -188, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 196, 58, -188
+ },
+
+ {
+ 11, -189, -189, -189, -189, -189, -189, -189, -189, -189,
+ -189, -189, -189, 58, -189, -189, 58, 58, 58, 58,
+ 58, 58, 197, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -189
+
+ },
+
+ {
+ 11, -190, -190, -190, -190, -190, -190, -190, -190, -190,
+ -190, -190, -190, 58, -190, -190, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 198, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -190
+ },
+
+ {
+ 11, -191, -191, -191, -191, -191, -191, -191, -191, -191,
+ -191, -191, -191, 58, -191, -191, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 199, 58, 58, 58, -191
+ },
+
+ {
+ 11, -192, -192, -192, -192, -192, -192, -192, -192, -192,
+ -192, -192, -192, 58, -192, -192, 58, 58, 58, 58,
+
+ 58, 200, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -192
+ },
+
+ {
+ 11, -193, -193, -193, -193, -193, -193, -193, -193, -193,
+ -193, -193, -193, 58, -193, -193, 58, 58, 58, 58,
+ 58, 201, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -193
+ },
+
+ {
+ 11, -194, -194, -194, -194, -194, -194, -194, -194, -194,
+ -194, -194, -194, 58, -194, -194, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 202, 58, 58, -194
+
+ },
+
+ {
+ 11, -195, -195, -195, -195, -195, -195, -195, -195, -195,
+ -195, -195, -195, 58, -195, -195, 58, 58, 58, 58,
+ 58, 203, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -195
+ },
+
+ {
+ 11, -196, -196, -196, -196, -196, -196, -196, -196, -196,
+ -196, -196, -196, 58, -196, -196, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -196
+ },
+
+ {
+ 11, -197, -197, -197, -197, -197, -197, -197, -197, -197,
+ -197, -197, -197, 58, -197, -197, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 204, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -197
+ },
+
+ {
+ 11, -198, -198, -198, -198, -198, -198, -198, -198, -198,
+ -198, -198, -198, 58, -198, -198, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -198
+ },
+
+ {
+ 11, -199, -199, -199, -199, -199, -199, -199, -199, -199,
+ -199, -199, -199, 58, -199, -199, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -199
+
+ },
+
+ {
+ 11, -200, -200, -200, -200, -200, -200, -200, -200, -200,
+ -200, -200, -200, 58, -200, -200, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -200
+ },
+
+ {
+ 11, -201, -201, -201, -201, -201, -201, -201, -201, -201,
+ -201, -201, -201, 58, -201, -201, 58, 205, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -201
+ },
+
+ {
+ 11, -202, -202, -202, -202, -202, -202, -202, -202, -202,
+ -202, -202, -202, 58, -202, -202, 58, 206, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -202
+ },
+
+ {
+ 11, -203, -203, -203, -203, -203, -203, -203, -203, -203,
+ -203, -203, -203, 58, -203, -203, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -203
+ },
+
+ {
+ 11, -204, -204, -204, -204, -204, -204, -204, -204, -204,
+ -204, -204, -204, 58, -204, -204, 58, 58, 58, 58,
+ 58, 58, 58, 207, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -204
+
+ },
+
+ {
+ 11, -205, -205, -205, -205, -205, -205, -205, -205, -205,
+ -205, -205, -205, 58, -205, -205, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 208, 58,
+ 58, 58, 58, 58, 58, 58, 58, -205
+ },
+
+ {
+ 11, -206, -206, -206, -206, -206, -206, -206, -206, -206,
+ -206, -206, -206, 58, -206, -206, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 209, 58, 58, -206
+ },
+
+ {
+ 11, -207, -207, -207, -207, -207, -207, -207, -207, -207,
+ -207, -207, -207, 58, -207, -207, 58, 58, 58, 58,
+
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -207
+ },
+
+ {
+ 11, -208, -208, -208, -208, -208, -208, -208, -208, -208,
+ -208, -208, -208, 58, -208, -208, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -208
+ },
+
+ {
+ 11, -209, -209, -209, -209, -209, -209, -209, -209, -209,
+ -209, -209, -209, 58, -209, -209, 58, 58, 58, 58,
+ 58, 210, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -209
+
+ },
+
+ {
+ 11, -210, -210, -210, -210, -210, -210, -210, -210, -210,
+ -210, -210, -210, 58, -210, -210, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, -210
+ },
+
+ } ;
+
+static yy_state_type yy_get_previous_state (void );
+static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
+static int yy_get_next_buffer (void );
+static void yy_fatal_error (yyconst char msg[] );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up zconftext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ zconfleng = (size_t) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+
+#define YY_NUM_RULES 64
+#define YY_END_OF_BUFFER 65
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static yyconst flex_int16_t yy_accept[211] =
+ { 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 65, 5, 4, 3, 2, 36, 37, 35, 35, 35,
+ 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
+ 63, 60, 62, 55, 59, 58, 57, 53, 48, 42,
+ 47, 51, 53, 40, 41, 50, 50, 43, 53, 50,
+ 50, 53, 4, 3, 2, 2, 1, 35, 35, 35,
+ 35, 35, 35, 35, 16, 35, 35, 35, 35, 35,
+ 35, 35, 35, 35, 35, 35, 63, 60, 62, 61,
+ 55, 54, 57, 56, 44, 51, 38, 50, 50, 52,
+ 45, 46, 39, 35, 35, 35, 35, 35, 35, 35,
+
+ 35, 35, 30, 29, 35, 35, 35, 35, 35, 35,
+ 35, 35, 35, 35, 49, 25, 35, 35, 35, 35,
+ 35, 35, 35, 35, 35, 35, 15, 35, 7, 35,
+ 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
+ 35, 35, 35, 35, 35, 35, 35, 17, 35, 35,
+ 35, 35, 35, 34, 35, 35, 35, 35, 35, 35,
+ 10, 35, 13, 35, 35, 35, 35, 33, 35, 35,
+ 35, 35, 35, 22, 35, 32, 9, 31, 35, 26,
+ 12, 35, 35, 21, 18, 35, 8, 35, 35, 35,
+ 35, 35, 27, 35, 35, 6, 35, 20, 19, 23,
+
+ 35, 35, 11, 35, 35, 35, 14, 28, 35, 24
+ } ;
+
+static yyconst flex_int32_t yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 4, 5, 6, 1, 1, 7, 8, 9,
+ 10, 1, 1, 1, 11, 12, 12, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 1, 1, 1,
+ 14, 1, 1, 1, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 1, 15, 1, 1, 16, 1, 17, 18, 19, 20,
+
+ 21, 22, 23, 24, 25, 13, 13, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 13, 13, 36,
+ 13, 13, 1, 37, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+extern int zconf_flex_debug;
+int zconf_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *zconftext;
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#define START_STRSIZE 16
+
+char *text;
+static char *text_ptr;
+static int text_size, text_asize;
+
+struct buffer {
+ struct buffer *parent;
+ YY_BUFFER_STATE state;
+};
+
+struct buffer *current_buf;
+
+static int last_ts, first_ts;
+
+static void zconf_endhelp(void);
+static struct buffer *zconf_endfile(void);
+
+void new_string(void)
+{
+ text = malloc(START_STRSIZE);
+ text_asize = START_STRSIZE;
+ text_ptr = text;
+ text_size = 0;
+ *text_ptr = 0;
+}
+
+void append_string(const char *str, int size)
+{
+ int new_size = text_size + size + 1;
+ if (new_size > text_asize) {
+ text = realloc(text, new_size);
+ text_asize = new_size;
+ text_ptr = text + text_size;
+ }
+ memcpy(text_ptr, str, size);
+ text_ptr += size;
+ text_size += size;
+ *text_ptr = 0;
+}
+
+void alloc_string(const char *str, int size)
+{
+ text = malloc(size + 1);
+ memcpy(text, str, size);
+ text[size] = 0;
+}
+
+#define INITIAL 0
+#define COMMAND 1
+#define HELP 2
+#define STRING 3
+#define PARAM 4
+
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int zconfwrap (void );
+#else
+extern int zconfwrap (void );
+#endif
+#endif
+
+ static void yyunput (int c,char *buf_ptr );
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char *,yyconst char *,int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * );
+#endif
+
+#ifndef YY_NO_INPUT
+
+#ifdef __cplusplus
+static int yyinput (void );
+#else
+static int input (void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO (void) fwrite( zconftext, zconfleng, 1, zconfout )
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ errno=0; \
+ while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(zconfin); \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int zconflex (void);
+
+#define YY_DECL int zconflex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after zconftext and zconfleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+
+ int str = 0;
+ int ts, i;
+
+ if ( (yy_init) )
+ {
+ (yy_init) = 0;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! zconfin )
+ zconfin = stdin;
+
+ if ( ! zconfout )
+ zconfout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_load_buffer_state( );
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of zconftext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)] ]) > 0 )
+ ++yy_cp;
+
+ yy_current_state = -yy_current_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+case 1:
+/* rule 1 can match eol */
+YY_RULE_SETUP
+current_file->lineno++;
+ YY_BREAK
+case 2:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 3:
+/* rule 3 can match eol */
+YY_RULE_SETUP
+current_file->lineno++; return T_EOL;
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+{
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+{
+ unput(zconftext[0]);
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+
+case 6:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_MAINMENU;
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_MENU;
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_ENDMENU;
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_SOURCE;
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_CHOICE;
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_ENDCHOICE;
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_COMMENT;
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_CONFIG;
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_MENUCONFIG;
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_HELP;
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_IF;
+ YY_BREAK
+case 17:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_ENDIF;
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_DEPENDS;
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_REQUIRES;
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_OPTIONAL;
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_DEFAULT;
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_PROMPT;
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_TRISTATE;
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_DEF_TRISTATE;
+ YY_BREAK
+case 25:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_BOOLEAN;
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_BOOLEAN;
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_DEF_BOOLEAN;
+ YY_BREAK
+case 28:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_DEF_BOOLEAN;
+ YY_BREAK
+case 29:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_INT;
+ YY_BREAK
+case 30:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_HEX;
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_STRING;
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_SELECT;
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_SELECT;
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+BEGIN(PARAM); return T_RANGE;
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+{
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 37:
+/* rule 37 can match eol */
+YY_RULE_SETUP
+current_file->lineno++; BEGIN(INITIAL);
+ YY_BREAK
+
+case 38:
+YY_RULE_SETUP
+return T_AND;
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+return T_OR;
+ YY_BREAK
+case 40:
+YY_RULE_SETUP
+return T_OPEN_PAREN;
+ YY_BREAK
+case 41:
+YY_RULE_SETUP
+return T_CLOSE_PAREN;
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+return T_NOT;
+ YY_BREAK
+case 43:
+YY_RULE_SETUP
+return T_EQUAL;
+ YY_BREAK
+case 44:
+YY_RULE_SETUP
+return T_UNEQUAL;
+ YY_BREAK
+case 45:
+YY_RULE_SETUP
+return T_IF;
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+return T_ON;
+ YY_BREAK
+case 47:
+YY_RULE_SETUP
+{
+ str = zconftext[0];
+ new_string();
+ BEGIN(STRING);
+ }
+ YY_BREAK
+case 48:
+/* rule 48 can match eol */
+YY_RULE_SETUP
+BEGIN(INITIAL); current_file->lineno++; return T_EOL;
+ YY_BREAK
+case 49:
+YY_RULE_SETUP
+/* ignore */
+ YY_BREAK
+case 50:
+YY_RULE_SETUP
+{
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 51:
+YY_RULE_SETUP
+/* comment */
+ YY_BREAK
+case 52:
+/* rule 52 can match eol */
+YY_RULE_SETUP
+current_file->lineno++;
+ YY_BREAK
+case 53:
+YY_RULE_SETUP
+
+ YY_BREAK
+case YY_STATE_EOF(PARAM):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 54:
+/* rule 54 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 55:
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ }
+ YY_BREAK
+case 56:
+/* rule 56 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 57:
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ }
+ YY_BREAK
+case 58:
+YY_RULE_SETUP
+{
+ if (str == zconftext[0]) {
+ BEGIN(PARAM);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ } else
+ append_string(zconftext, 1);
+ }
+ YY_BREAK
+case 59:
+/* rule 59 can match eol */
+YY_RULE_SETUP
+{
+ printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
+ current_file->lineno++;
+ BEGIN(INITIAL);
+ return T_EOL;
+ }
+ YY_BREAK
+case YY_STATE_EOF(STRING):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 60:
+YY_RULE_SETUP
+{
+ ts = 0;
+ for (i = 0; i < zconfleng; i++) {
+ if (zconftext[i] == '\t')
+ ts = (ts & ~7) + 8;
+ else
+ ts++;
+ }
+ last_ts = ts;
+ if (first_ts) {
+ if (ts < first_ts) {
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ ts -= first_ts;
+ while (ts > 8) {
+ append_string(" ", 8);
+ ts -= 8;
+ }
+ append_string(" ", ts);
+ }
+ }
+ YY_BREAK
+case 61:
+/* rule 61 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+case 62:
+/* rule 62 can match eol */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ append_string("\n", 1);
+ }
+ YY_BREAK
+case 63:
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ if (!first_ts)
+ first_ts = last_ts;
+ }
+ YY_BREAK
+case YY_STATE_EOF(HELP):
+{
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(COMMAND):
+{
+ if (current_buf) {
+ zconf_endfile();
+ return T_EOF;
+ }
+ fclose(zconfin);
+ yyterminate();
+}
+ YY_BREAK
+case 64:
+YY_RULE_SETUP
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed zconfin at a new source and called
+ * zconflex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_c_buf_p);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( zconfwrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * zconftext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+} /* end of zconflex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ register char *source = (yytext_ptr);
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ size_t num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ zconfrestart(zconfin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ register int yy_is_jam;
+
+ yy_current_state = yy_nxt[yy_current_state][1];
+ yy_is_jam = (yy_current_state <= 0);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+ static void yyunput (int c, register char * yy_bp )
+{
+ register char *yy_cp;
+
+ yy_cp = (yy_c_buf_p);
+
+ /* undo effects of setting up zconftext */
+ *yy_cp = (yy_hold_char);
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register int number_to_move = (yy_n_chars) + 2;
+ register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ register char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ (yytext_ptr) = yy_bp;
+ (yy_hold_char) = *yy_cp;
+ (yy_c_buf_p) = yy_cp;
+}
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (yy_c_buf_p) - (yytext_ptr);
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ zconfrestart(zconfin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( zconfwrap( ) )
+ return EOF;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve zconftext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void zconfrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_init_buffer(YY_CURRENT_BUFFER,input_file );
+ zconf_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * zconfpop_buffer_state();
+ * zconfpush_buffer_state(new_buffer);
+ */
+ zconfensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ zconf_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (zconfwrap()) processing, but the only time this flag
+ * is looked at is after zconfwrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void zconf_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE zconf_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2 );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ zconf_init_buffer(b,file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with zconf_create_buffer()
+ *
+ */
+ void zconf_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ zconffree((void *) b->yy_ch_buf );
+
+ zconffree((void *) b );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a zconfrestart() or at EOF.
+ */
+ static void zconf_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ zconf_flush_buffer(b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then zconf_init_buffer was _probably_
+ * called from zconfrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void zconf_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ zconf_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ zconfensure_buffer_stack();
+
+ /* This block is copied from zconf_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from zconf_switch_to_buffer. */
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void zconfpop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void zconfensure_buffer_stack (void)
+{
+ int num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ int grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ zconf_switch_to_buffer(b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to zconflex() will
+ * scan from a @e copy of @a str.
+ * @param str a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * zconf_scan_bytes() instead.
+ */
+YY_BUFFER_STATE zconf_scan_string (yyconst char * str )
+{
+
+ return zconf_scan_bytes(str,strlen(str) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
+ * scan from a @e copy of @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = len + 2;
+ buf = (char *) zconfalloc(n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
+
+ for ( i = 0; i < len; ++i )
+ buf[i] = bytes[i];
+
+ buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = zconf_scan_buffer(buf,n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yy_fatal_error (yyconst char* msg )
+{
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ zconftext[zconfleng] = (yy_hold_char); \
+ (yy_c_buf_p) = zconftext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ zconfleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int zconfget_lineno (void)
+{
+
+ return zconflineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *zconfget_in (void)
+{
+ return zconfin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *zconfget_out (void)
+{
+ return zconfout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int zconfget_leng (void)
+{
+ return zconfleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *zconfget_text (void)
+{
+ return zconftext;
+}
+
+/** Set the current line number.
+ * @param line_number
+ *
+ */
+void zconfset_lineno (int line_number )
+{
+
+ zconflineno = line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param in_str A readable stream.
+ *
+ * @see zconf_switch_to_buffer
+ */
+void zconfset_in (FILE * in_str )
+{
+ zconfin = in_str ;
+}
+
+void zconfset_out (FILE * out_str )
+{
+ zconfout = out_str ;
+}
+
+int zconfget_debug (void)
+{
+ return zconf_flex_debug;
+}
+
+void zconfset_debug (int bdebug )
+{
+ zconf_flex_debug = bdebug ;
+}
+
+/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
+int zconflex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ zconfpop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ zconffree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
+{
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * s )
+{
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *zconfalloc (yy_size_t size )
+{
+ return (void *) malloc( size );
+}
+
+void *zconfrealloc (void * ptr, yy_size_t size )
+{
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+}
+
+void zconffree (void * ptr )
+{
+ free( (char *) ptr ); /* see zconfrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#undef YY_NEW_FILE
+#undef YY_FLUSH_BUFFER
+#undef yy_set_bol
+#undef yy_new_buffer
+#undef yy_set_interactive
+#undef yytext_ptr
+#undef YY_DO_BEFORE_ACTION
+
+#ifdef YY_DECL_IS_OURS
+#undef YY_DECL_IS_OURS
+#undef YY_DECL
+#endif
+
+void zconf_starthelp(void)
+{
+ new_string();
+ last_ts = first_ts = 0;
+ BEGIN(HELP);
+}
+
+static void zconf_endhelp(void)
+{
+ zconflval.string = text;
+ BEGIN(INITIAL);
+}
+
+/*
+ * Try to open specified file with following names:
+ * ./name
+ * $(srctree)/name
+ * The latter is used when srctree is separate from objtree
+ * when compiling the kernel.
+ * Return NULL if file is not found.
+ */
+FILE *zconf_fopen(const char *name)
+{
+ char *env, fullname[PATH_MAX+1];
+ FILE *f;
+
+ f = fopen(name, "r");
+ if (!f && name[0] != '/') {
+ env = getenv(SRCTREE);
+ if (env) {
+ sprintf(fullname, "%s/%s", env, name);
+ f = fopen(fullname, "r");
+ }
+ }
+ return f;
+}
+
+void zconf_initscan(const char *name)
+{
+ zconfin = zconf_fopen(name);
+ if (!zconfin) {
+ printf("can't find file %s\n", name);
+ exit(1);
+ }
+
+ current_buf = malloc(sizeof(*current_buf));
+ memset(current_buf, 0, sizeof(*current_buf));
+
+ current_file = file_lookup(name);
+ current_file->lineno = 1;
+ current_file->flags = FILE_BUSY;
+}
+
+void zconf_nextfile(const char *name)
+{
+ struct file *file = file_lookup(name);
+ struct buffer *buf = malloc(sizeof(*buf));
+ memset(buf, 0, sizeof(*buf));
+
+ current_buf->state = YY_CURRENT_BUFFER;
+ zconfin = zconf_fopen(name);
+ if (!zconfin) {
+ printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), name);
+ exit(1);
+ }
+ zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE));
+ buf->parent = current_buf;
+ current_buf = buf;
+
+ if (file->flags & FILE_BUSY) {
+ printf("recursive scan (%s)?\n", name);
+ exit(1);
+ }
+ if (file->flags & FILE_SCANNED) {
+ printf("file %s already scanned?\n", name);
+ exit(1);
+ }
+ file->flags |= FILE_BUSY;
+ file->lineno = 1;
+ file->parent = current_file;
+ current_file = file;
+}
+
+static struct buffer *zconf_endfile(void)
+{
+ struct buffer *parent;
+
+ current_file->flags |= FILE_SCANNED;
+ current_file->flags &= ~FILE_BUSY;
+ current_file = current_file->parent;
+
+ parent = current_buf->parent;
+ if (parent) {
+ fclose(zconfin);
+ zconf_delete_buffer(YY_CURRENT_BUFFER);
+ zconf_switch_to_buffer(parent->state);
+ }
+ free(current_buf);
+ current_buf = parent;
+
+ return parent;
+}
+
+int zconf_lineno(void)
+{
+ if (current_buf)
+ return current_file->lineno - 1;
+ else
+ return 0;
+}
+
+char *zconf_curname(void)
+{
+ if (current_buf)
+ return current_file->name;
+ else
+ return "<none>";
+}
+
--- /dev/null
+/* A Bison parser, made by GNU Bison 1.875a. */
+
+/* Skeleton parser for Yacc-like parsing with Bison,
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, when this file is copied by Bison into a
+ Bison output file, you may use that output file without restriction.
+ This special exception was added by the Free Software Foundation
+ in version 1.24 of Bison. */
+
+/* Written by Richard Stallman by simplifying the original so called
+ ``semantic'' parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Using locations. */
+#define YYLSP_NEEDED 0
+
+/* If NAME_PREFIX is specified substitute the variables and functions
+ names. */
+#define yyparse zconfparse
+#define yylex zconflex
+#define yyerror zconferror
+#define yylval zconflval
+#define yychar zconfchar
+#define yydebug zconfdebug
+#define yynerrs zconfnerrs
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ T_MAINMENU = 258,
+ T_MENU = 259,
+ T_ENDMENU = 260,
+ T_SOURCE = 261,
+ T_CHOICE = 262,
+ T_ENDCHOICE = 263,
+ T_COMMENT = 264,
+ T_CONFIG = 265,
+ T_MENUCONFIG = 266,
+ T_HELP = 267,
+ T_HELPTEXT = 268,
+ T_IF = 269,
+ T_ENDIF = 270,
+ T_DEPENDS = 271,
+ T_REQUIRES = 272,
+ T_OPTIONAL = 273,
+ T_PROMPT = 274,
+ T_DEFAULT = 275,
+ T_TRISTATE = 276,
+ T_DEF_TRISTATE = 277,
+ T_BOOLEAN = 278,
+ T_DEF_BOOLEAN = 279,
+ T_STRING = 280,
+ T_INT = 281,
+ T_HEX = 282,
+ T_WORD = 283,
+ T_WORD_QUOTE = 284,
+ T_UNEQUAL = 285,
+ T_EOF = 286,
+ T_EOL = 287,
+ T_CLOSE_PAREN = 288,
+ T_OPEN_PAREN = 289,
+ T_ON = 290,
+ T_SELECT = 291,
+ T_RANGE = 292,
+ T_OR = 293,
+ T_AND = 294,
+ T_EQUAL = 295,
+ T_NOT = 296
+ };
+#endif
+#define T_MAINMENU 258
+#define T_MENU 259
+#define T_ENDMENU 260
+#define T_SOURCE 261
+#define T_CHOICE 262
+#define T_ENDCHOICE 263
+#define T_COMMENT 264
+#define T_CONFIG 265
+#define T_MENUCONFIG 266
+#define T_HELP 267
+#define T_HELPTEXT 268
+#define T_IF 269
+#define T_ENDIF 270
+#define T_DEPENDS 271
+#define T_REQUIRES 272
+#define T_OPTIONAL 273
+#define T_PROMPT 274
+#define T_DEFAULT 275
+#define T_TRISTATE 276
+#define T_DEF_TRISTATE 277
+#define T_BOOLEAN 278
+#define T_DEF_BOOLEAN 279
+#define T_STRING 280
+#define T_INT 281
+#define T_HEX 282
+#define T_WORD 283
+#define T_WORD_QUOTE 284
+#define T_UNEQUAL 285
+#define T_EOF 286
+#define T_EOL 287
+#define T_CLOSE_PAREN 288
+#define T_OPEN_PAREN 289
+#define T_ON 290
+#define T_SELECT 291
+#define T_RANGE 292
+#define T_OR 293
+#define T_AND 294
+#define T_EQUAL 295
+#define T_NOT 296
+
+
+
+
+/* Copy the first part of user declarations. */
+
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
+
+#define PRINTD 0x0001
+#define DEBUG_PARSE 0x0002
+
+int cdebug = PRINTD;
+
+extern int zconflex(void);
+static void zconfprint(const char *err, ...);
+static void zconferror(const char *err);
+static bool zconf_endtoken(int token, int starttoken, int endtoken);
+
+struct symbol *symbol_hash[257];
+
+#define YYERROR_VERBOSE
+
+
+/* Enabling traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
+
+typedef union YYSTYPE {
+ int token;
+ char *string;
+ struct symbol *symbol;
+ struct expr *expr;
+ struct menu *menu;
+} YYSTYPE;
+/* Line 191 of yacc.c. */
+
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+# define YYSTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+/* Copy the second part of user declarations. */
+
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+
+/* Line 214 of yacc.c. */
+
+
+#if ! defined (yyoverflow) || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# if YYSTACK_USE_ALLOCA
+# define YYSTACK_ALLOC alloca
+# else
+# ifndef YYSTACK_USE_ALLOCA
+# if defined (alloca) || defined (_ALLOCA_H)
+# define YYSTACK_ALLOC alloca
+# else
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's `empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# else
+# if defined (__STDC__) || defined (__cplusplus)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# endif
+# define YYSTACK_ALLOC malloc
+# define YYSTACK_FREE free
+# endif
+#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
+
+
+#if (! defined (yyoverflow) \
+ && (! defined (__cplusplus) \
+ || (YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ short yyss;
+ YYSTYPE yyvs;
+ };
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (short) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ register YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack, Stack, yysize); \
+ Stack = &yyptr->Stack; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined (__STDC__) || defined (__cplusplus)
+ typedef signed char yysigned_char;
+#else
+ typedef short yysigned_char;
+#endif
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 2
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 201
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 42
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 41
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 104
+/* YYNRULES -- Number of states. */
+#define YYNSTATES 182
+
+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 296
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+static const unsigned char yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41
+};
+
+#if YYDEBUG
+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
+ YYRHS. */
+static const unsigned short yyprhs[] =
+{
+ 0, 0, 3, 4, 7, 9, 11, 13, 17, 19,
+ 21, 23, 26, 28, 30, 32, 34, 36, 38, 42,
+ 45, 49, 52, 53, 56, 59, 62, 65, 69, 74,
+ 78, 83, 87, 91, 95, 100, 105, 110, 116, 119,
+ 122, 124, 128, 131, 132, 135, 138, 141, 144, 149,
+ 153, 157, 160, 165, 166, 169, 173, 175, 179, 182,
+ 183, 186, 189, 192, 196, 199, 201, 205, 208, 209,
+ 212, 215, 218, 222, 226, 228, 232, 235, 238, 241,
+ 242, 245, 248, 253, 257, 261, 262, 265, 267, 269,
+ 272, 275, 278, 280, 282, 283, 286, 288, 292, 296,
+ 300, 303, 307, 311, 313
+};
+
+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
+static const yysigned_char yyrhs[] =
+{
+ 43, 0, -1, -1, 43, 44, -1, 45, -1, 55,
+ -1, 66, -1, 3, 77, 79, -1, 5, -1, 15,
+ -1, 8, -1, 1, 79, -1, 61, -1, 71, -1,
+ 47, -1, 49, -1, 69, -1, 79, -1, 10, 28,
+ 32, -1, 46, 50, -1, 11, 28, 32, -1, 48,
+ 50, -1, -1, 50, 51, -1, 50, 75, -1, 50,
+ 73, -1, 50, 32, -1, 21, 76, 32, -1, 22,
+ 81, 80, 32, -1, 23, 76, 32, -1, 24, 81,
+ 80, 32, -1, 26, 76, 32, -1, 27, 76, 32,
+ -1, 25, 76, 32, -1, 19, 77, 80, 32, -1,
+ 20, 81, 80, 32, -1, 36, 28, 80, 32, -1,
+ 37, 82, 82, 80, 32, -1, 7, 32, -1, 52,
+ 56, -1, 78, -1, 53, 58, 54, -1, 53, 58,
+ -1, -1, 56, 57, -1, 56, 75, -1, 56, 73,
+ -1, 56, 32, -1, 19, 77, 80, 32, -1, 21,
+ 76, 32, -1, 23, 76, 32, -1, 18, 32, -1,
+ 20, 28, 80, 32, -1, -1, 58, 45, -1, 14,
+ 81, 32, -1, 78, -1, 59, 62, 60, -1, 59,
+ 62, -1, -1, 62, 45, -1, 62, 66, -1, 62,
+ 55, -1, 4, 77, 32, -1, 63, 74, -1, 78,
+ -1, 64, 67, 65, -1, 64, 67, -1, -1, 67,
+ 45, -1, 67, 66, -1, 67, 55, -1, 67, 1,
+ 32, -1, 6, 77, 32, -1, 68, -1, 9, 77,
+ 32, -1, 70, 74, -1, 12, 32, -1, 72, 13,
+ -1, -1, 74, 75, -1, 74, 32, -1, 16, 35,
+ 81, 32, -1, 16, 81, 32, -1, 17, 81, 32,
+ -1, -1, 77, 80, -1, 28, -1, 29, -1, 5,
+ 79, -1, 8, 79, -1, 15, 79, -1, 32, -1,
+ 31, -1, -1, 14, 81, -1, 82, -1, 82, 40,
+ 82, -1, 82, 30, 82, -1, 34, 81, 33, -1,
+ 41, 81, -1, 81, 38, 81, -1, 81, 39, 81,
+ -1, 28, -1, 29, -1
+};
+
+/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+static const unsigned short yyrline[] =
+{
+ 0, 94, 94, 95, 98, 99, 100, 101, 102, 103,
+ 104, 105, 109, 110, 111, 112, 113, 114, 120, 128,
+ 134, 142, 152, 154, 155, 156, 157, 160, 166, 173,
+ 179, 186, 192, 198, 204, 210, 216, 222, 230, 239,
+ 245, 254, 255, 261, 263, 264, 265, 266, 269, 275,
+ 281, 287, 293, 299, 301, 306, 315, 324, 325, 331,
+ 333, 334, 335, 340, 347, 353, 362, 363, 369, 371,
+ 372, 373, 374, 377, 383, 390, 397, 404, 410, 417,
+ 418, 419, 422, 427, 432, 440, 442, 447, 448, 451,
+ 452, 453, 457, 457, 459, 460, 463, 464, 465, 466,
+ 467, 468, 469, 472, 473
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE
+/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU",
+ "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
+ "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
+ "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_DEFAULT", "T_TRISTATE",
+ "T_DEF_TRISTATE", "T_BOOLEAN", "T_DEF_BOOLEAN", "T_STRING", "T_INT",
+ "T_HEX", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", "T_EOF", "T_EOL",
+ "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_ON", "T_SELECT", "T_RANGE", "T_OR",
+ "T_AND", "T_EQUAL", "T_NOT", "$accept", "input", "block",
+ "common_block", "config_entry_start", "config_stmt",
+ "menuconfig_entry_start", "menuconfig_stmt", "config_option_list",
+ "config_option", "choice", "choice_entry", "choice_end", "choice_stmt",
+ "choice_option_list", "choice_option", "choice_block", "if", "if_end",
+ "if_stmt", "if_block", "menu", "menu_entry", "menu_end", "menu_stmt",
+ "menu_block", "source", "source_stmt", "comment", "comment_stmt",
+ "help_start", "help", "depends_list", "depends", "prompt_stmt_opt",
+ "prompt", "end", "nl_or_eof", "if_expr", "expr", "symbol", 0
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
+ token YYLEX-NUM. */
+static const unsigned short yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296
+};
+# endif
+
+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const unsigned char yyr1[] =
+{
+ 0, 42, 43, 43, 44, 44, 44, 44, 44, 44,
+ 44, 44, 45, 45, 45, 45, 45, 45, 46, 47,
+ 48, 49, 50, 50, 50, 50, 50, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 52, 53,
+ 54, 55, 55, 56, 56, 56, 56, 56, 57, 57,
+ 57, 57, 57, 58, 58, 59, 60, 61, 61, 62,
+ 62, 62, 62, 63, 64, 65, 66, 66, 67, 67,
+ 67, 67, 67, 68, 69, 70, 71, 72, 73, 74,
+ 74, 74, 75, 75, 75, 76, 76, 77, 77, 78,
+ 78, 78, 79, 79, 80, 80, 81, 81, 81, 81,
+ 81, 81, 81, 82, 82
+};
+
+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
+static const unsigned char yyr2[] =
+{
+ 0, 2, 0, 2, 1, 1, 1, 3, 1, 1,
+ 1, 2, 1, 1, 1, 1, 1, 1, 3, 2,
+ 3, 2, 0, 2, 2, 2, 2, 3, 4, 3,
+ 4, 3, 3, 3, 4, 4, 4, 5, 2, 2,
+ 1, 3, 2, 0, 2, 2, 2, 2, 4, 3,
+ 3, 2, 4, 0, 2, 3, 1, 3, 2, 0,
+ 2, 2, 2, 3, 2, 1, 3, 2, 0, 2,
+ 2, 2, 3, 3, 1, 3, 2, 2, 2, 0,
+ 2, 2, 4, 3, 3, 0, 2, 1, 1, 2,
+ 2, 2, 1, 1, 0, 2, 1, 3, 3, 3,
+ 2, 3, 3, 1, 1
+};
+
+/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
+ STATE-NUM when YYTABLE doesn't specify something else to do. Zero
+ means the default is an error. */
+static const unsigned char yydefact[] =
+{
+ 2, 0, 1, 0, 0, 0, 8, 0, 0, 10,
+ 0, 0, 0, 0, 9, 93, 92, 3, 4, 22,
+ 14, 22, 15, 43, 53, 5, 59, 12, 79, 68,
+ 6, 74, 16, 79, 13, 17, 11, 87, 88, 0,
+ 0, 0, 38, 0, 0, 0, 103, 104, 0, 0,
+ 0, 96, 19, 21, 39, 42, 58, 64, 0, 76,
+ 7, 63, 73, 75, 18, 20, 0, 100, 55, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 85, 0,
+ 85, 0, 85, 85, 85, 26, 0, 0, 23, 0,
+ 25, 24, 0, 0, 0, 85, 85, 47, 44, 46,
+ 45, 0, 0, 0, 54, 41, 40, 60, 62, 57,
+ 61, 56, 81, 80, 0, 69, 71, 66, 70, 65,
+ 99, 101, 102, 98, 97, 77, 0, 0, 0, 94,
+ 94, 0, 94, 94, 0, 94, 0, 0, 0, 94,
+ 0, 78, 51, 94, 94, 0, 0, 89, 90, 91,
+ 72, 0, 83, 84, 0, 0, 0, 27, 86, 0,
+ 29, 0, 33, 31, 32, 0, 94, 0, 0, 49,
+ 50, 82, 95, 34, 35, 28, 30, 36, 0, 48,
+ 52, 37
+};
+
+/* YYDEFGOTO[NTERM-NUM]. */
+static const short yydefgoto[] =
+{
+ -1, 1, 17, 18, 19, 20, 21, 22, 52, 88,
+ 23, 24, 105, 25, 54, 98, 55, 26, 109, 27,
+ 56, 28, 29, 117, 30, 58, 31, 32, 33, 34,
+ 89, 90, 57, 91, 131, 132, 106, 35, 155, 50,
+ 51
+};
+
+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+#define YYPACT_NINF -99
+static const short yypact[] =
+{
+ -99, 48, -99, 38, 46, 46, -99, 46, -29, -99,
+ 46, -17, -3, -11, -99, -99, -99, -99, -99, -99,
+ -99, -99, -99, -99, -99, -99, -99, -99, -99, -99,
+ -99, -99, -99, -99, -99, -99, -99, -99, -99, 38,
+ 12, 15, -99, 18, 51, 62, -99, -99, -11, -11,
+ 4, -24, 138, 138, 160, 121, 110, -4, 81, -4,
+ -99, -99, -99, -99, -99, -99, -19, -99, -99, -11,
+ -11, 70, 70, 73, 32, -11, 46, -11, 46, -11,
+ 46, -11, 46, 46, 46, -99, 36, 70, -99, 95,
+ -99, -99, 96, 46, 106, 46, 46, -99, -99, -99,
+ -99, 38, 38, 38, -99, -99, -99, -99, -99, -99,
+ -99, -99, -99, -99, 112, -99, -99, -99, -99, -99,
+ -99, 117, -99, -99, -99, -99, -11, 33, 65, 131,
+ 1, 119, 131, 1, 136, 1, 153, 154, 155, 131,
+ 70, -99, -99, 131, 131, 156, 157, -99, -99, -99,
+ -99, 101, -99, -99, -11, 158, 159, -99, -99, 161,
+ -99, 162, -99, -99, -99, 163, 131, 164, 165, -99,
+ -99, -99, 99, -99, -99, -99, -99, -99, 166, -99,
+ -99, -99
+};
+
+/* YYPGOTO[NTERM-NUM]. */
+static const short yypgoto[] =
+{
+ -99, -99, -99, 111, -99, -99, -99, -99, 178, -99,
+ -99, -99, -99, 91, -99, -99, -99, -99, -99, -99,
+ -99, -99, -99, -99, 115, -99, -99, -99, -99, -99,
+ -99, 146, 168, 89, 27, 0, 126, -1, -98, -48,
+ -63
+};
+
+/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule which
+ number is the opposite. If zero, do what YYDEFACT says.
+ If YYTABLE_NINF, syntax error. */
+#define YYTABLE_NINF -68
+static const short yytable[] =
+{
+ 66, 67, 36, 42, 39, 40, 71, 41, 123, 124,
+ 43, 44, 74, 75, 120, 154, 72, 46, 47, 69,
+ 70, 121, 122, 48, 140, 45, 127, 128, 112, 130,
+ 49, 133, 156, 135, 158, 159, 68, 161, 60, 69,
+ 70, 165, 69, 70, 61, 167, 168, 62, 2, 3,
+ 63, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 46, 47, 13, 14, 139, 152, 48, 126, 178, 15,
+ 16, 69, 70, 49, 37, 38, 129, 166, 151, 15,
+ 16, -67, 114, 64, -67, 5, 101, 7, 8, 102,
+ 10, 11, 12, 143, 65, 13, 103, 153, 46, 47,
+ 147, 148, 149, 69, 70, 125, 172, 134, 141, 136,
+ 137, 138, 15, 16, 5, 101, 7, 8, 102, 10,
+ 11, 12, 145, 146, 13, 103, 101, 7, 142, 102,
+ 10, 11, 12, 171, 144, 13, 103, 69, 70, 69,
+ 70, 15, 16, 100, 150, 154, 113, 108, 113, 116,
+ 73, 157, 15, 16, 74, 75, 70, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 104, 107, 160, 115,
+ 85, 110, 73, 118, 86, 87, 74, 75, 92, 93,
+ 94, 95, 111, 96, 119, 162, 163, 164, 169, 170,
+ 173, 174, 97, 175, 176, 177, 179, 180, 181, 53,
+ 99, 59
+};
+
+static const unsigned char yycheck[] =
+{
+ 48, 49, 3, 32, 4, 5, 30, 7, 71, 72,
+ 10, 28, 16, 17, 33, 14, 40, 28, 29, 38,
+ 39, 69, 70, 34, 87, 28, 74, 75, 32, 77,
+ 41, 79, 130, 81, 132, 133, 32, 135, 39, 38,
+ 39, 139, 38, 39, 32, 143, 144, 32, 0, 1,
+ 32, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 28, 29, 14, 15, 28, 32, 34, 35, 166, 31,
+ 32, 38, 39, 41, 28, 29, 76, 140, 126, 31,
+ 32, 0, 1, 32, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 93, 32, 14, 15, 32, 28, 29,
+ 101, 102, 103, 38, 39, 32, 154, 80, 13, 82,
+ 83, 84, 31, 32, 4, 5, 6, 7, 8, 9,
+ 10, 11, 95, 96, 14, 15, 5, 6, 32, 8,
+ 9, 10, 11, 32, 28, 14, 15, 38, 39, 38,
+ 39, 31, 32, 54, 32, 14, 57, 56, 59, 58,
+ 12, 32, 31, 32, 16, 17, 39, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 55, 56, 32, 58,
+ 32, 56, 12, 58, 36, 37, 16, 17, 18, 19,
+ 20, 21, 56, 23, 58, 32, 32, 32, 32, 32,
+ 32, 32, 32, 32, 32, 32, 32, 32, 32, 21,
+ 54, 33
+};
+
+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const unsigned char yystos[] =
+{
+ 0, 43, 0, 1, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 14, 15, 31, 32, 44, 45, 46,
+ 47, 48, 49, 52, 53, 55, 59, 61, 63, 64,
+ 66, 68, 69, 70, 71, 79, 79, 28, 29, 77,
+ 77, 77, 32, 77, 28, 28, 28, 29, 34, 41,
+ 81, 82, 50, 50, 56, 58, 62, 74, 67, 74,
+ 79, 32, 32, 32, 32, 32, 81, 81, 32, 38,
+ 39, 30, 40, 12, 16, 17, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 32, 36, 37, 51, 72,
+ 73, 75, 18, 19, 20, 21, 23, 32, 57, 73,
+ 75, 5, 8, 15, 45, 54, 78, 45, 55, 60,
+ 66, 78, 32, 75, 1, 45, 55, 65, 66, 78,
+ 33, 81, 81, 82, 82, 32, 35, 81, 81, 77,
+ 81, 76, 77, 81, 76, 81, 76, 76, 76, 28,
+ 82, 13, 32, 77, 28, 76, 76, 79, 79, 79,
+ 32, 81, 32, 32, 14, 80, 80, 32, 80, 80,
+ 32, 80, 32, 32, 32, 80, 82, 80, 80, 32,
+ 32, 32, 81, 32, 32, 32, 32, 32, 80, 32,
+ 32, 32
+};
+
+#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
+# define YYSIZE_T __SIZE_TYPE__
+#endif
+#if ! defined (YYSIZE_T) && defined (size_t)
+# define YYSIZE_T size_t
+#endif
+#if ! defined (YYSIZE_T)
+# if defined (__STDC__) || defined (__cplusplus)
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# endif
+#endif
+#if ! defined (YYSIZE_T)
+# define YYSIZE_T unsigned int
+#endif
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrlab1
+
+
+/* Like YYERROR except do call yyerror. This remains here temporarily
+ to ease the transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+
+#define YYFAIL goto yyerrlab
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ yytoken = YYTRANSLATE (yychar); \
+ YYPOPSTACK; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror ("syntax error: cannot back up");\
+ YYERROR; \
+ } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+/* YYLLOC_DEFAULT -- Compute the default location (before the actions
+ are run). */
+
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ Current.first_line = Rhs[1].first_line; \
+ Current.first_column = Rhs[1].first_column; \
+ Current.last_line = Rhs[N].last_line; \
+ Current.last_column = Rhs[N].last_column;
+#endif
+
+/* YYLEX -- calling `yylex' with the right arguments. */
+
+#ifdef YYLEX_PARAM
+# define YYLEX yylex (YYLEX_PARAM)
+#else
+# define YYLEX yylex ()
+#endif
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+# define YYDSYMPRINT(Args) \
+do { \
+ if (yydebug) \
+ yysymprint Args; \
+} while (0)
+
+# define YYDSYMPRINTF(Title, Token, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yysymprint (stderr, \
+ Token, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (cinluded). |
+`------------------------------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yy_stack_print (short *bottom, short *top)
+#else
+static void
+yy_stack_print (bottom, top)
+ short *bottom;
+ short *top;
+#endif
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (/* Nothing. */; bottom <= top; ++bottom)
+ YYFPRINTF (stderr, " %d", *bottom);
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yy_reduce_print (int yyrule)
+#else
+static void
+yy_reduce_print (yyrule)
+ int yyrule;
+#endif
+{
+ int yyi;
+ unsigned int yylineno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ",
+ yyrule - 1, yylineno);
+ /* Print the symbols being reduced, and their result. */
+ for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
+ YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]);
+ YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]);
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (Rule); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YYDSYMPRINT(Args)
+# define YYDSYMPRINTF(Title, Token, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#if YYMAXDEPTH == 0
+# undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+\f
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined (__GLIBC__) && defined (_STRING_H)
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+# if defined (__STDC__) || defined (__cplusplus)
+yystrlen (const char *yystr)
+# else
+yystrlen (yystr)
+ const char *yystr;
+# endif
+{
+ register const char *yys = yystr;
+
+ while (*yys++ != '\0')
+ continue;
+
+ return yys - yystr - 1;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE)
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+# if defined (__STDC__) || defined (__cplusplus)
+yystpcpy (char *yydest, const char *yysrc)
+# else
+yystpcpy (yydest, yysrc)
+ char *yydest;
+ const char *yysrc;
+# endif
+{
+ register char *yyd = yydest;
+ register const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+#endif /* !YYERROR_VERBOSE */
+
+\f
+
+#if YYDEBUG
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yysymprint (FILE *yyoutput, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yysymprint (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ /* Pacify ``unused variable'' warnings. */
+ (void) yyvaluep;
+
+ if (yytype < YYNTOKENS)
+ {
+ YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
+# ifdef YYPRINT
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# endif
+ }
+ else
+ YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+
+ switch (yytype)
+ {
+ default:
+ break;
+ }
+ YYFPRINTF (yyoutput, ")");
+}
+
+#endif /* ! YYDEBUG */
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yydestruct (int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yydestruct (yytype, yyvaluep)
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ /* Pacify ``unused variable'' warnings. */
+ (void) yyvaluep;
+
+ switch (yytype)
+ {
+
+ default:
+ break;
+ }
+}
+\f
+
+/* Prevent warnings from -Wmissing-prototypes. */
+
+#ifdef YYPARSE_PARAM
+# if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void *YYPARSE_PARAM);
+# else
+int yyparse ();
+# endif
+#else /* ! YYPARSE_PARAM */
+#if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
+
+/* The lookahead symbol. */
+int yychar;
+
+/* The semantic value of the lookahead symbol. */
+YYSTYPE yylval;
+
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+#ifdef YYPARSE_PARAM
+# if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void *YYPARSE_PARAM)
+# else
+int yyparse (YYPARSE_PARAM)
+ void *YYPARSE_PARAM;
+# endif
+#else /* ! YYPARSE_PARAM */
+#if defined (__STDC__) || defined (__cplusplus)
+int
+yyparse (void)
+#else
+int
+yyparse ()
+
+#endif
+#endif
+{
+
+ register int yystate;
+ register int yyn;
+ int yyresult;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+
+ /* Three stacks and their tools:
+ `yyss': related to states,
+ `yyvs': related to semantic values,
+ `yyls': related to locations.
+
+ Refer to the stacks thru separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ short yyssa[YYINITDEPTH];
+ short *yyss = yyssa;
+ register short *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs = yyvsa;
+ register YYSTYPE *yyvsp;
+
+
+
+#define YYPOPSTACK (yyvsp--, yyssp--)
+
+ YYSIZE_T yystacksize = YYINITDEPTH;
+
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+
+ /* When reducing, the number of symbols on the RHS of the reduced
+ rule. */
+ int yylen;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss;
+ yyvsp = yyvs;
+
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks.
+ */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ short *yyss1 = yyss;
+
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow ("parser stack overflow",
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyoverflowlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyoverflowlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ short *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyoverflowlab;
+ YYSTACK_RELOCATE (yyss);
+ YYSTACK_RELOCATE (yyvs);
+
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* yyresume: */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYPACT_NINF)
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = YYLEX;
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YYDSYMPRINTF ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yyn == 0 || yyn == YYTABLE_NINF)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+ YYDPRINTF ((stderr, "Shifting token %s, ", yytname[yytoken]));
+
+ /* Discard the token being shifted unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ *++yyvsp = yylval;
+
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ `$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 8:
+
+ { zconfprint("unexpected 'endmenu' statement"); ;}
+ break;
+
+ case 9:
+
+ { zconfprint("unexpected 'endif' statement"); ;}
+ break;
+
+ case 10:
+
+ { zconfprint("unexpected 'endchoice' statement"); ;}
+ break;
+
+ case 11:
+
+ { zconfprint("syntax error"); yyerrok; ;}
+ break;
+
+ case 18:
+
+ {
+ struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
+;}
+ break;
+
+ case 19:
+
+ {
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 20:
+
+ {
+ struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
+;}
+ break;
+
+ case 21:
+
+ {
+ if (current_entry->prompt)
+ current_entry->prompt->type = P_MENU;
+ else
+ zconfprint("warning: menuconfig statement without prompt");
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 27:
+
+ {
+ menu_set_type(S_TRISTATE);
+ printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 28:
+
+ {
+ menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
+ menu_set_type(S_TRISTATE);
+ printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 29:
+
+ {
+ menu_set_type(S_BOOLEAN);
+ printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 30:
+
+ {
+ menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
+ menu_set_type(S_BOOLEAN);
+ printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 31:
+
+ {
+ menu_set_type(S_INT);
+ printd(DEBUG_PARSE, "%s:%d:int\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 32:
+
+ {
+ menu_set_type(S_HEX);
+ printd(DEBUG_PARSE, "%s:%d:hex\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 33:
+
+ {
+ menu_set_type(S_STRING);
+ printd(DEBUG_PARSE, "%s:%d:string\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 34:
+
+ {
+ menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 35:
+
+ {
+ menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 36:
+
+ {
+ menu_add_symbol(P_SELECT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 37:
+
+ {
+ menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,yyvsp[-3].symbol, yyvsp[-2].symbol), yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 38:
+
+ {
+ struct symbol *sym = sym_lookup(NULL, 0);
+ sym->flags |= SYMBOL_CHOICE;
+ menu_add_entry(sym);
+ menu_add_expr(P_CHOICE, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 39:
+
+ {
+ menu_end_entry();
+ menu_add_menu();
+;}
+ break;
+
+ case 40:
+
+ {
+ if (zconf_endtoken(yyvsp[0].token, T_CHOICE, T_ENDCHOICE)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 42:
+
+ {
+ printf("%s:%d: missing 'endchoice' for this 'choice' statement\n", current_menu->file->name, current_menu->lineno);
+ zconfnerrs++;
+;}
+ break;
+
+ case 48:
+
+ {
+ menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 49:
+
+ {
+ menu_set_type(S_TRISTATE);
+ printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 50:
+
+ {
+ menu_set_type(S_BOOLEAN);
+ printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 51:
+
+ {
+ current_entry->sym->flags |= SYMBOL_OPTIONAL;
+ printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 52:
+
+ {
+ menu_add_symbol(P_DEFAULT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 55:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
+ menu_add_entry(NULL);
+ menu_add_dep(yyvsp[-1].expr);
+ menu_end_entry();
+ menu_add_menu();
+;}
+ break;
+
+ case 56:
+
+ {
+ if (zconf_endtoken(yyvsp[0].token, T_IF, T_ENDIF)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 58:
+
+ {
+ printf("%s:%d: missing 'endif' for this 'if' statement\n", current_menu->file->name, current_menu->lineno);
+ zconfnerrs++;
+;}
+ break;
+
+ case 63:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prop(P_MENU, yyvsp[-1].string, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 64:
+
+ {
+ menu_end_entry();
+ menu_add_menu();
+;}
+ break;
+
+ case 65:
+
+ {
+ if (zconf_endtoken(yyvsp[0].token, T_MENU, T_ENDMENU)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 67:
+
+ {
+ printf("%s:%d: missing 'endmenu' for this 'menu' statement\n", current_menu->file->name, current_menu->lineno);
+ zconfnerrs++;
+;}
+ break;
+
+ case 72:
+
+ { zconfprint("invalid menu option"); yyerrok; ;}
+ break;
+
+ case 73:
+
+ {
+ yyval.string = yyvsp[-1].string;
+ printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
+;}
+ break;
+
+ case 74:
+
+ {
+ zconf_nextfile(yyvsp[0].string);
+;}
+ break;
+
+ case 75:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prop(P_COMMENT, yyvsp[-1].string, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 76:
+
+ {
+ menu_end_entry();
+;}
+ break;
+
+ case 77:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
+ zconf_starthelp();
+;}
+ break;
+
+ case 78:
+
+ {
+ current_entry->sym->help = yyvsp[0].string;
+;}
+ break;
+
+ case 82:
+
+ {
+ menu_add_dep(yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 83:
+
+ {
+ menu_add_dep(yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:depends\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 84:
+
+ {
+ menu_add_dep(yyvsp[-1].expr);
+ printd(DEBUG_PARSE, "%s:%d:requires\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 86:
+
+ {
+ menu_add_prop(P_PROMPT, yyvsp[-1].string, NULL, yyvsp[0].expr);
+;}
+ break;
+
+ case 89:
+
+ { yyval.token = T_ENDMENU; ;}
+ break;
+
+ case 90:
+
+ { yyval.token = T_ENDCHOICE; ;}
+ break;
+
+ case 91:
+
+ { yyval.token = T_ENDIF; ;}
+ break;
+
+ case 94:
+
+ { yyval.expr = NULL; ;}
+ break;
+
+ case 95:
+
+ { yyval.expr = yyvsp[0].expr; ;}
+ break;
+
+ case 96:
+
+ { yyval.expr = expr_alloc_symbol(yyvsp[0].symbol); ;}
+ break;
+
+ case 97:
+
+ { yyval.expr = expr_alloc_comp(E_EQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
+ break;
+
+ case 98:
+
+ { yyval.expr = expr_alloc_comp(E_UNEQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
+ break;
+
+ case 99:
+
+ { yyval.expr = yyvsp[-1].expr; ;}
+ break;
+
+ case 100:
+
+ { yyval.expr = expr_alloc_one(E_NOT, yyvsp[0].expr); ;}
+ break;
+
+ case 101:
+
+ { yyval.expr = expr_alloc_two(E_OR, yyvsp[-2].expr, yyvsp[0].expr); ;}
+ break;
+
+ case 102:
+
+ { yyval.expr = expr_alloc_two(E_AND, yyvsp[-2].expr, yyvsp[0].expr); ;}
+ break;
+
+ case 103:
+
+ { yyval.symbol = sym_lookup(yyvsp[0].string, 0); free(yyvsp[0].string); ;}
+ break;
+
+ case 104:
+
+ { yyval.symbol = sym_lookup(yyvsp[0].string, 1); free(yyvsp[0].string); ;}
+ break;
+
+
+ }
+
+/* Line 999 of yacc.c. */
+
+\f
+ yyvsp -= yylen;
+ yyssp -= yylen;
+
+
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+
+ /* Now `shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*------------------------------------.
+| yyerrlab -- here on detecting error |
+`------------------------------------*/
+yyerrlab:
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if YYERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (YYPACT_NINF < yyn && yyn < YYLAST)
+ {
+ YYSIZE_T yysize = 0;
+ int yytype = YYTRANSLATE (yychar);
+ char *yymsg;
+ int yyx, yycount;
+
+ yycount = 0;
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. */
+ for (yyx = yyn < 0 ? -yyn : 0;
+ yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ yysize += yystrlen (yytname[yyx]) + 15, yycount++;
+ yysize += yystrlen ("syntax error, unexpected ") + 1;
+ yysize += yystrlen (yytname[yytype]);
+ yymsg = (char *) YYSTACK_ALLOC (yysize);
+ if (yymsg != 0)
+ {
+ char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
+ yyp = yystpcpy (yyp, yytname[yytype]);
+
+ if (yycount < 5)
+ {
+ yycount = 0;
+ for (yyx = yyn < 0 ? -yyn : 0;
+ yyx < (int) (sizeof (yytname) / sizeof (char *));
+ yyx++)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ {
+ const char *yyq = ! yycount ? ", expecting " : " or ";
+ yyp = yystpcpy (yyp, yyq);
+ yyp = yystpcpy (yyp, yytname[yyx]);
+ yycount++;
+ }
+ }
+ yyerror (yymsg);
+ YYSTACK_FREE (yymsg);
+ }
+ else
+ yyerror ("syntax error; also virtual memory exhausted");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ yyerror ("syntax error");
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ {
+ /* Pop the error token. */
+ YYPOPSTACK;
+ /* Pop the rest of the stack. */
+ while (yyss < yyssp)
+ {
+ YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
+ yydestruct (yystos[*yyssp], yyvsp);
+ YYPOPSTACK;
+ }
+ YYABORT;
+ }
+
+ YYDSYMPRINTF ("Error: discarding", yytoken, &yylval, &yylloc);
+ yydestruct (yytoken, &yylval);
+ yychar = YYEMPTY;
+
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*----------------------------------------------------.
+| yyerrlab1 -- error raised explicitly by an action. |
+`----------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (yyn != YYPACT_NINF)
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+ YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
+ yydestruct (yystos[yystate], yyvsp);
+ yyvsp--;
+ yystate = *--yyssp;
+
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ YYDPRINTF ((stderr, "Shifting error token, "));
+
+ *++yyvsp = yylval;
+
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#ifndef yyoverflow
+/*----------------------------------------------.
+| yyoverflowlab -- parser overflow comes here. |
+`----------------------------------------------*/
+yyoverflowlab:
+ yyerror ("parser stack overflow");
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+ return yyresult;
+}
+
+
+
+
+
+void conf_parse(const char *name)
+{
+ struct symbol *sym;
+ int i;
+
+ zconf_initscan(name);
+
+ sym_init();
+ menu_init();
+ modules_sym = sym_lookup("MODULES", 0);
+ rootmenu.prompt = menu_add_prop(P_MENU, "Linux Kernel Configuration", NULL, NULL);
+
+ //zconfdebug = 1;
+ zconfparse();
+ if (zconfnerrs)
+ exit(1);
+ menu_finalize(&rootmenu);
+ for_all_symbols(i, sym) {
+ if (!(sym->flags & SYMBOL_CHECKED) && sym_check_deps(sym))
+ printf("\n");
+ else
+ sym->flags |= SYMBOL_CHECK_DONE;
+ }
+
+ sym_change_count = 1;
+}
+
+const char *zconf_tokenname(int token)
+{
+ switch (token) {
+ case T_MENU: return "menu";
+ case T_ENDMENU: return "endmenu";
+ case T_CHOICE: return "choice";
+ case T_ENDCHOICE: return "endchoice";
+ case T_IF: return "if";
+ case T_ENDIF: return "endif";
+ }
+ return "<token>";
+}
+
+static bool zconf_endtoken(int token, int starttoken, int endtoken)
+{
+ if (token != endtoken) {
+ zconfprint("unexpected '%s' within %s block", zconf_tokenname(token), zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ if (current_menu->file != current_file) {
+ zconfprint("'%s' in different file than '%s'", zconf_tokenname(token), zconf_tokenname(starttoken));
+ zconfprint("location of the '%s'", zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ return true;
+}
+
+static void zconfprint(const char *err, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno() + 1);
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconferror(const char *err)
+{
+ fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
+}
+
+void print_quoted_string(FILE *out, const char *str)
+{
+ const char *p;
+ int len;
+
+ putc('"', out);
+ while ((p = strchr(str, '"'))) {
+ len = p - str;
+ if (len)
+ fprintf(out, "%.*s", len, str);
+ fputs("\\\"", out);
+ str = p + 1;
+ }
+ fputs(str, out);
+ putc('"', out);
+}
+
+void print_symbol(FILE *out, struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ struct property *prop;
+
+ if (sym_is_choice(sym))
+ fprintf(out, "choice\n");
+ else
+ fprintf(out, "config %s\n", sym->name);
+ switch (sym->type) {
+ case S_BOOLEAN:
+ fputs(" boolean\n", out);
+ break;
+ case S_TRISTATE:
+ fputs(" tristate\n", out);
+ break;
+ case S_STRING:
+ fputs(" string\n", out);
+ break;
+ case S_INT:
+ fputs(" integer\n", out);
+ break;
+ case S_HEX:
+ fputs(" hex\n", out);
+ break;
+ default:
+ fputs(" ???\n", out);
+ break;
+ }
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu != menu)
+ continue;
+ switch (prop->type) {
+ case P_PROMPT:
+ fputs(" prompt ", out);
+ print_quoted_string(out, prop->text);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_DEFAULT:
+ fputs( " default ", out);
+ expr_fprint(prop->expr, out);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_CHOICE:
+ fputs(" #choice value\n", out);
+ break;
+ default:
+ fprintf(out, " unknown prop %d!\n", prop->type);
+ break;
+ }
+ }
+ if (sym->help) {
+ int len = strlen(sym->help);
+ while (sym->help[--len] == '\n')
+ sym->help[len] = 0;
+ fprintf(out, " help\n%s\n", sym->help);
+ }
+ fputc('\n', out);
+}
+
+void zconfdump(FILE *out)
+{
+ struct property *prop;
+ struct symbol *sym;
+ struct menu *menu;
+
+ menu = rootmenu.list;
+ while (menu) {
+ if ((sym = menu->sym))
+ print_symbol(out, menu);
+ else if ((prop = menu->prompt)) {
+ switch (prop->type) {
+ case P_COMMENT:
+ fputs("\ncomment ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ case P_MENU:
+ fputs("\nmenu ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ default:
+ ;
+ }
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" depends ", out);
+ expr_fprint(prop->visible.expr, out);
+ fputc('\n', out);
+ }
+ fputs("\n", out);
+ }
+
+ if (menu->list)
+ menu = menu->list;
+ else if (menu->next)
+ menu = menu->next;
+ else while ((menu = menu->parent)) {
+ if (menu->prompt && menu->prompt->type == P_MENU)
+ fputs("\nendmenu\n", out);
+ if (menu->next) {
+ menu = menu->next;
+ break;
+ }
+ }
+ }
+}
+
+#include "lex.zconf.c"
+#include "confdata.c"
+#include "expr.c"
+#include "symbol.c"
+#include "menu.c"
+
+
--- /dev/null
+/* A Bison parser, made from zconf.y, by GNU bison 1.75. */
+
+/* Skeleton parser for Yacc-like parsing with Bison,
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, when this file is copied by Bison into a
+ Bison output file, you may use that output file without restriction.
+ This special exception was added by the Free Software Foundation
+ in version 1.24 of Bison. */
+
+#ifndef BISON_ZCONF_TAB_H
+# define BISON_ZCONF_TAB_H
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ T_MAINMENU = 258,
+ T_MENU = 259,
+ T_ENDMENU = 260,
+ T_SOURCE = 261,
+ T_CHOICE = 262,
+ T_ENDCHOICE = 263,
+ T_COMMENT = 264,
+ T_CONFIG = 265,
+ T_HELP = 266,
+ T_HELPTEXT = 267,
+ T_IF = 268,
+ T_ENDIF = 269,
+ T_DEPENDS = 270,
+ T_REQUIRES = 271,
+ T_OPTIONAL = 272,
+ T_PROMPT = 273,
+ T_DEFAULT = 274,
+ T_TRISTATE = 275,
+ T_BOOLEAN = 276,
+ T_INT = 277,
+ T_HEX = 278,
+ T_WORD = 279,
+ T_STRING = 280,
+ T_UNEQUAL = 281,
+ T_EOF = 282,
+ T_EOL = 283,
+ T_CLOSE_PAREN = 284,
+ T_OPEN_PAREN = 285,
+ T_ON = 286,
+ T_OR = 287,
+ T_AND = 288,
+ T_EQUAL = 289,
+ T_NOT = 290
+ };
+#endif
+#define T_MAINMENU 258
+#define T_MENU 259
+#define T_ENDMENU 260
+#define T_SOURCE 261
+#define T_CHOICE 262
+#define T_ENDCHOICE 263
+#define T_COMMENT 264
+#define T_CONFIG 265
+#define T_HELP 266
+#define T_HELPTEXT 267
+#define T_IF 268
+#define T_ENDIF 269
+#define T_DEPENDS 270
+#define T_REQUIRES 271
+#define T_OPTIONAL 272
+#define T_PROMPT 273
+#define T_DEFAULT 274
+#define T_TRISTATE 275
+#define T_BOOLEAN 276
+#define T_INT 277
+#define T_HEX 278
+#define T_WORD 279
+#define T_STRING 280
+#define T_UNEQUAL 281
+#define T_EOF 282
+#define T_EOL 283
+#define T_CLOSE_PAREN 284
+#define T_OPEN_PAREN 285
+#define T_ON 286
+#define T_OR 287
+#define T_AND 288
+#define T_EQUAL 289
+#define T_NOT 290
+
+
+
+
+#ifndef YYSTYPE
+#line 33 "zconf.y"
+typedef union {
+ int token;
+ char *string;
+ struct symbol *symbol;
+ struct expr *expr;
+ struct menu *menu;
+} yystype;
+/* Line 1281 of /usr/share/bison/yacc.c. */
+#line 118 "zconf.tab.h"
+# define YYSTYPE yystype
+#endif
+
+extern YYSTYPE zconflval;
+
+
+#endif /* not BISON_ZCONF_TAB_H */
+
+++ /dev/null
-Summary: The Linux kernel (the core of the Linux operating system)
-
-# What parts do we want to build? We must build at least one kernel.
-# These are the kernels that are built IF the architecture allows it.
-
-%define buildup 1
-%define buildsmp 0
-%define builduml 0
-%define buildsource 0
-%define builddoc 0
-
-
-# Versions of various parts
-
-#
-# Polite request for people who spin their own kernel rpms:
-# please modify the "release" field in a way that identifies
-# that the kernel isn't the stock distribution kernel, for example by
-# adding some text to the end of the version number.
-#
-%define sublevel 8
-%define kversion 2.6.%{sublevel}
-%define rpmversion 2.6.%{sublevel}
-%define rhbsys %([ -r /etc/beehive-root ] && echo || echo .`whoami`)
-%define release 1.521.2.6.planetlab%{?date:.%{date}}
-%define signmodules 0
-
-%define KVERREL %{PACKAGE_VERSION}-%{PACKAGE_RELEASE}
-
-# Override generic defaults with per-arch defaults
-
-%define image_install_path boot
-
-#
-# Three sets of minimum package version requirements in the form of Conflicts:
-# to versions below the minimum
-#
-
-#
-# First the general kernel 2.6 required versions as per
-# Documentation/Changes
-#
-%define kernel_dot_org_conflicts ppp <= 2.3.15, pcmcia-cs <= 3.1.20, isdn4k-utils <= 3.0, mount < 2.10r-5, nfs-utils < 1.0.3, e2fsprogs < 1.29, util-linux < 2.10, jfsutils < 1.0.14, reiserfsprogs < 3.6.3, xfsprogs < 2.1.0, procps < 2.0.9, oprofile < 0.5.3
-
-#
-# Then a series of requirements that are distribution specific, either
-# because we add patches for something, or the older versions have
-# problems with the newer kernel or lack certain things that make
-# integration in the distro harder than needed.
-#
-%define package_conflicts cipe < 1.4.5, tux < 2.1.0, kudzu <= 0.92, initscripts < 7.23, dev < 3.2-7, iptables < 1.2.5-3, bcm5820 < 1.81, nvidia-rh72 <= 1.0
-
-#
-# Several packages had bugs in them that became obvious when the NPTL
-# threading code got integrated.
-#
-%define nptl_conflicts SysVinit < 2.84-13, pam < 0.75-48, vixie-cron < 3.0.1-73, privoxy < 3.0.0-8, spamassassin < 2.44-4.8.x, cups < 1.1.17-13
-
-#
-# Packages that need to be installed before the kernel is, because the %post
-# scripts use them.
-#
-%define kernel_prereq fileutils, module-init-tools, initscripts >= 5.83, mkinitrd >= 3.5.5
-
-Vendor: PlanetLab
-Packager: PlanetLab Central <support@planet-lab.org>
-Distribution: PlanetLab 3.0
-URL: http://cvs.planet-lab.org/cvs/linux-2.6
-
-Name: kernel
-Group: System Environment/Kernel
-License: GPLv2
-Version: %{rpmversion}
-Release: %{release}
-ExclusiveOS: Linux
-Provides: kernel = %{version}
-Provides: kernel-drm = 4.3.0
-Prereq: %{kernel_prereq}
-Conflicts: %{kernel_dot_org_conflicts}
-Conflicts: %{package_conflicts}
-Conflicts: %{nptl_conflicts}
-# We can't let RPM do the dependencies automatic because it'll then pick up
-# a correct but undesirable perl dependency from the module headers which
-# isn't required for the kernel proper to function
-AutoReqProv: no
-
-#
-# List the packages used during the kernel build
-#
-BuildPreReq: module-init-tools, patch >= 2.5.4, bash >= 2.03, sh-utils, tar
-BuildPreReq: bzip2, findutils, gzip, m4, perl, make >= 3.78, gnupg
-#BuildPreReq: kernel-utils >= 1:2.4-12.1.142
-BuildRequires: gcc >= 2.96-98, binutils >= 2.12, redhat-rpm-config
-BuildConflicts: rhbuildsys(DiskFree) < 500Mb
-BuildArchitectures: i686
-
-
-
-Source0: ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-%{kversion}.tar.bz2
-
-BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root
-
-%description
-The kernel package contains the Linux kernel (vmlinuz), the core of any
-Linux operating system. The kernel handles the basic functions
-of the operating system: memory allocation, process allocation, device
-input and output, etc.
-
-
-%package sourcecode
-Summary: The source code for the Linux kernel.
-Group: Development/System
-Prereq: fileutils
-Requires: make >= 3.78
-Requires: gcc >= 3.2
-Requires: /usr/bin/strip
-# for xconfig and gconfig
-Requires: qt-devel, gtk2-devel readline-devel ncurses-devel
-Provides: kernel-source
-Obsoletes: kernel-source <= 2.6.6
-
-%description sourcecode
-The kernel-sourcecode package contains the source code files for the Linux
-kernel. The source files can be used to build a custom kernel that is
-smaller by virtue of only including drivers for your particular hardware, if
-you are so inclined (and you know what you're doing). The customisation
-guide in the documentation describes in detail how to do this. This package
-is neither needed nor usable for building external kernel modules for
-linking such modules into the default operating system kernels.
-
-%package doc
-Summary: Various documentation bits found in the kernel source.
-Group: Documentation
-%if !%{buildsource}
-Obsoletes: kernel-source <= 2.6.6
-Obsoletes: kernel-sourcecode <= 2.6.6
-%endif
-
-%description doc
-This package contains documentation files from the kernel
-source. Various bits of information about the Linux kernel and the
-device drivers shipped with it are documented in these files.
-
-You'll want to install this package if you need a reference to the
-options that can be passed to Linux kernel modules at load time.
-
-
-%package smp
-Summary: The Linux kernel compiled for SMP machines.
-
-Group: System Environment/Kernel
-Provides: kernel = %{version}
-Provides: kernel-drm = 4.3.0
-Prereq: %{kernel_prereq}
-Conflicts: %{kernel_dot_org_conflicts}
-Conflicts: %{package_conflicts}
-Conflicts: %{nptl_conflicts}
-# upto and including kernel 2.4.9 rpms, the 4Gb+ kernel was called kernel-enterprise
-# now that the smp kernel offers this capability, obsolete the old kernel
-Obsoletes: kernel-enterprise < 2.4.10
-# We can't let RPM do the dependencies automatic because it'll then pick up
-# a correct but undesirable perl dependency from the module headers which
-# isn't required for the kernel proper to function
-AutoReqProv: no
-
-%description smp
-This package includes a SMP version of the Linux kernel. It is
-required only on machines with two or more CPUs as well as machines with
-hyperthreading technology.
-
-Install the kernel-smp package if your machine uses two or more CPUs.
-
-%package uml
-Summary: The Linux kernel compiled for use in user mode (User Mode Linux).
-
-Group: System Environment/Kernel
-
-%description uml
-This package includes a user mode version of the Linux kernel.
-
-%package vserver
-Summary: A placeholder RPM that provides kernel and kernel-drm
-
-Group: System Environment/Kernel
-Provides: kernel = %{version}
-Provides: kernel-drm = 4.3.0
-
-%description vserver
-VServers do not require and cannot use kernels, but some RPMs have
-implicit or explicit dependencies on the "kernel" package
-(e.g. tcpdump). This package installs no files but provides the
-necessary dependencies to make rpm and yum happy.
-
-%prep
-
-%setup -n linux-%{kversion}
-
-# make sure the kernel has the sublevel we know it has. This looks weird
-# but for -pre and -rc versions we need it since we only want to use
-# the higher version when the final kernel is released.
-perl -p -i -e "s/^SUBLEVEL.*/SUBLEVEL = %{sublevel}/" Makefile
-perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -prep/" Makefile
-
-# get rid of unwanted files resulting from patch fuzz
-find . -name "*.orig" -exec rm -fv {} \;
-find . -name "*~" -exec rm -fv {} \;
-
-###
-### build
-###
-%build
-
-BuildKernel() {
-
- # Pick the right config file for the kernel we're building
- if [ -n "$1" ] ; then
- Config=kernel-%{kversion}-%{_target_cpu}-$1-planetlab.config
- else
- Config=kernel-%{kversion}-%{_target_cpu}-planetlab.config
- fi
-
- KernelVer=%{version}-%{release}$1
- echo BUILDING A KERNEL FOR $1 %{_target_cpu}...
-
- # make sure EXTRAVERSION says what we want it to say
- perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}$1/" Makefile
-
- # override ARCH in the case of UML
- if [ "$1" = "uml" ] ; then
- export ARCH=um
- fi
-
- # and now to start the build process
-
- make -s mrproper
- cp configs/$Config .config
-
- make -s nonint_oldconfig > /dev/null
- make -s include/linux/version.h
-
- make -s %{?_smp_mflags} bzImage
- make -s %{?_smp_mflags} modules || exit 1
- make buildcheck
-
- # Start installing the results
-
- mkdir -p $RPM_BUILD_ROOT/usr/lib/debug/boot
- mkdir -p $RPM_BUILD_ROOT/%{image_install_path}
- install -m 644 System.map $RPM_BUILD_ROOT/usr/lib/debug/boot/System.map-$KernelVer
- objdump -t vmlinux | grep ksymtab | cut -f2 | cut -d" " -f2 | cut -c11- | sort -u > exported
- echo "_stext" >> exported
- echo "_end" >> exported
- touch $RPM_BUILD_ROOT/boot/System.map-$KernelVer
- for i in `cat exported`
- do
- grep " $i\$" System.map >> $RPM_BUILD_ROOT/boot/System.map-$KernelVer || :
- grep "tab_$i\$" System.map >> $RPM_BUILD_ROOT/boot/System.map-$KernelVer || :
- grep "__crc_$i\$" System.map >> $RPM_BUILD_ROOT/boot/System.map-$KernelVer ||:
- done
- rm -f exported
-# install -m 644 init/kerntypes.o $RPM_BUILD_ROOT/boot/Kerntypes-$KernelVer
- install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer
- rm -f System.map
- cp arch/*/boot/bzImage $RPM_BUILD_ROOT/%{image_install_path}/vmlinuz-$KernelVer
-
- mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer
- make -s INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer
-
- # And save the headers/makefiles etc for building modules against
- #
- # This all looks scary, but the end result is supposed to be:
- # * all arch relevant include/ files
- # * all Makefile/Kconfig files
- # * all script/ files
-
- rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- # first copy everything
- cp --parents `find -type f -name Makefile -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- # then drop all but the needed Makefiles/Kconfig files
- rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation
- rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts
- rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include
- cp arch/%{_arch}/kernel/asm-offsets.s $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/kernel || :
- cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build
- cp -a arch/%{_arch}/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || :
- cp -a arch/%{_arch}/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || :
- rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o
- rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o
- mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include
- cd include
- cp -a acpi config linux math-emu media net pcmcia rxrpc scsi sound video asm asm-generic $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include
- cp -a `readlink asm` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include
- # Make sure the Makefile and version.h have a matching timestamp so that
- # external modules can be built
- touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/linux/version.h
- touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/.config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/linux/autoconf.h
- cd ..
-
- #
- # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm
- #
- mkdir -p $RPM_BUILD_ROOT/usr/lib/debug/lib/modules/$KernelVer
- cp vmlinux $RPM_BUILD_ROOT/usr/lib/debug/lib/modules/$KernelVer
-
- # mark modules executable so that strip-to-file can strip them
- find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f | xargs chmod u+x
-
- # detect missing or incorrect license tags
- for i in `find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" ` ; do echo -n "$i " ; /sbin/modinfo -l $i >> modinfo ; done
- cat modinfo | grep -v "^GPL" | grep -v "^Dual BSD/GPL" | grep -v "^Dual MPL/GPL" | grep -v "^GPL and additional rights" | grep -v "^GPL v2" && exit 1
- rm -f modinfo
- # remove files that will be auto generated by depmod at rpm -i time
- rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.*
-
-}
-
-###
-# DO it...
-###
-
-# prepare directories
-rm -rf $RPM_BUILD_ROOT
-mkdir -p $RPM_BUILD_ROOT/boot
-
-%if %{buildup}
-BuildKernel
-%endif
-
-%if %{buildsmp}
-BuildKernel smp
-%endif
-
-%if %{builduml}
-BuildKernel uml
-%endif
-
-###
-### install
-###
-
-%install
-
-# architectures that don't get kernel-source (i586/i686/athlon) dont need
-# much of an install because the build phase already copied the needed files
-
-%if %{builddoc}
-mkdir -p $RPM_BUILD_ROOT/usr/share/doc/kernel-doc-%{kversion}/Documentation
-
-# sometimes non-world-readable files sneak into the kernel source tree
-chmod -R a+r *
-# copy the source over
-tar cf - Documentation | tar xf - -C $RPM_BUILD_ROOT/usr/share/doc/kernel-doc-%{kversion}
-%endif
-
-%if %{buildsource}
-
-mkdir -p $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}
-chmod -R a+r *
-
-# clean up the source tree so that it is ready for users to build their own
-# kernel
-make -s mrproper
-# copy the source over
-tar cf - . | tar xf - -C $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}
-
-# set the EXTRAVERSION to <version>custom, so that people who follow a kernel building howto
-# don't accidentally overwrite their currently working moduleset and hose
-# their system
-perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}custom/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/Makefile
-
-# some config options may be appropriate for an rpm kernel build but are less so for custom user builds,
-# change those to values that are more appropriate as defeault for people who build their own kernel.
-perl -p -i -e "s/^CONFIG_DEBUG_INFO.*/# CONFIG_DEBUG_INFO is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-perl -p -i -e "s/^.*CONFIG_DEBUG_PAGEALLOC.*/# CONFIG_DEBUG_PAGEALLOC is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-perl -p -i -e "s/^.*CONFIG_DEBUG_SLAB.*/# CONFIG_DEBUG_SLAB is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-perl -p -i -e "s/^.*CONFIG_DEBUG_SPINLOCK.*/# CONFIG_DEBUG_SPINLOCK is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-perl -p -i -e "s/^.*CONFIG_DEBUG_HIGHMEM.*/# CONFIG_DEBUG_HIGHMEM is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-perl -p -i -e "s/^.*CONFIG_MODULE_SIG.*/# CONFIG_MODULE_SIG is not set/" $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}/configs/*
-
-install -m 644 %{SOURCE10} $RPM_BUILD_ROOT/usr/src/linux-%{KVERREL}
-%endif
-
-###
-### clean
-###
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-###
-### scripts
-###
-
-# load the loop module for upgrades...in case the old modules get removed we have
-# loopback in the kernel so that mkinitrd will work.
-%pre
-/sbin/modprobe loop 2> /dev/null > /dev/null || :
-exit 0
-
-%pre smp
-/sbin/modprobe loop 2> /dev/null > /dev/null || :
-exit 0
-
-%post
-# trick mkinitrd in case the current environment does not have device mapper
-rootdev=$(awk '/^[ \t]*[^#]/ { if ($2 == "/") { print $1; }}' /etc/fstab)
-if echo $rootdev |grep -q /dev/mapper 2>/dev/null ; then
- if [ ! -f $rootdev ]; then
- fake_root_lvm=1
- mkdir -p $(dirname $rootdev)
- touch $rootdev
- fi
-fi
-[ -x /sbin/new-kernel-pkg ] && /sbin/new-kernel-pkg --mkinitrd --depmod --install %{KVERREL}
-if [ -n "$fake_root_lvm" ]; then
- rm -f $rootdev
-fi
-if [ -x /usr/sbin/hardlink ] ; then
-pushd /lib/modules/%{KVERREL}/build > /dev/null ; {
- cd /lib/modules/%{KVERREL}/build
- find . -type f | while read f; do hardlink -c /lib/modules/*/build/$f $f ; done
-}
-popd
-fi
-
-# make some useful links
-pushd /boot > /dev/null ; {
- ln -sf System.map-%{KVERREL} System.map
-# ln -sf Kerntypes-%{KVERREL} Kerntypes
- ln -sf config-%{KVERREL} config
- ln -sf initrd-%{KVERREL}.img initrd-boot
- ln -sf vmlinuz-%{KVERREL} kernel-boot
-}
-popd
-
-# ask for a reboot
-mkdir -p /etc/planetlab
-touch /etc/planetlab/update-reboot
-
-%post smp
-[ -x /sbin/new-kernel-pkg ] && /sbin/new-kernel-pkg --mkinitrd --depmod --install %{KVERREL}smp
-if [ -x /usr/sbin/hardlink ] ; then
-pushd /lib/modules/%{KVERREL}smp/build > /dev/null ; {
- cd /lib/modules/%{KVERREL}smp/build
- find . -type f | while read f; do hardlink -c /lib/modules/*/build/$f $f ; done
-}
-popd
-fi
-
-
-%preun
-/sbin/modprobe loop 2> /dev/null > /dev/null || :
-[ -x /sbin/new-kernel-pkg ] && /sbin/new-kernel-pkg --rminitrd --rmmoddep --remove %{KVERREL}
-
-%preun smp
-/sbin/modprobe loop 2> /dev/null > /dev/null || :
-[ -x /sbin/new-kernel-pkg ] && /sbin/new-kernel-pkg --rminitrd --rmmoddep --remove %{KVERREL}smp
-
-
-###
-### file lists
-###
-
-%if %{buildup}
-%files
-%defattr(-,root,root)
-/%{image_install_path}/vmlinuz-%{KVERREL}
-#/boot/Kerntypes-%{KVERREL}
-/boot/System.map-%{KVERREL}
-/boot/config-%{KVERREL}
-%dir /lib/modules/%{KVERREL}
-/lib/modules/%{KVERREL}/kernel
-%verify(not mtime) /lib/modules/%{KVERREL}/build
-
-%endif
-
-%if %{buildsmp}
-%files smp
-%defattr(-,root,root)
-/%{image_install_path}/vmlinuz-%{KVERREL}smp
-#/boot/Kerntypes-%{KVERREL}smp
-/boot/System.map-%{KVERREL}smp
-/boot/config-%{KVERREL}smp
-%dir /lib/modules/%{KVERREL}smp
-/lib/modules/%{KVERREL}smp/kernel
-%verify(not mtime) /lib/modules/%{KVERREL}smp/build
-
-%endif
-
-%if %{builduml}
-%files uml
-%defattr(-,root,root)
-
-%endif
-
-# only some architecture builds need kernel-source and kernel-doc
-
-%if %{buildsource}
-%files sourcecode
-%defattr(-,root,root)
-/usr/src/linux-%{KVERREL}/
-%endif
-
-
-%if %{builddoc}
-%files doc
-%defattr(-,root,root)
-/usr/share/doc/kernel-doc-%{kversion}/Documentation/*
-%endif
-
-
-%files vserver
-%defattr(-,root,root)
-# no files
-
-%changelog
-* Thu Sep 16 2004 Mark Huang <mlhuang@cs.princeton.edu>
-- merge to Fedora Core 2 2.6.8-1.521
-
-* Tue Aug 31 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix execshield buglet with legacy binaries
-- 2.6.9-rc1-bk7
-
-* Mon Aug 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk6
-
-* Sat Aug 28 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk4, now with i915 DRM driver
-
-* Fri Aug 27 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk2
-
-* Mon Aug 23 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8.1-bk2
-
-* Sat Aug 21 2004 Arjan van de Ven <arjanv@redhat.com>
-- attempt to fix early-udev bug
-
-* Fri Aug 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc4-bk3
-- split execshield up some more
-
-* Fri Aug 13 2004 Dave Jones <davej@redhat.com>
-- Update SCSI whitelist again with some more card readers.
-
-* Mon Aug 9 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc3-bk3
-
-* Thu Aug 5 2004 Mark Huang <mlhuang@cs.princeton.edu>
-- adapt for Fedora Core 2 based PlanetLab 3.0 (remove Source and Patch
- sections, most non-x86 sections, and GPG sections)
-
-* Wed Aug 4 2004 Arjan van de Ven <arjanv@redhat.com>
-- Add the flex-mmap bits for s390/s390x (Pete Zaitcev)
-- Add flex-mmap for x86-64 32 bit emulation
-- 2.6.8-rc3
-
-* Mon Aug 2 2004 Arjan van de Ven <arjanv@redhat.com>
-- Add Rik's token trashing control patch
-
-* Sun Aug 1 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk11
-
-* Fri Jul 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk8
-
-* Wed Jul 28 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk6
-- make a start at splitting up the execshield patchkit
-
-* Fri Jul 16 2004 Arjan van de Ven <arjanv@redhat.com>
-- ppc32 embedded updates
-
-* Thu Jul 15 2004 Arjan van de Ven <arjanv@redhat.com>
-- make USB modules again and add Alan's real fix for the SMM-meets-USB bug
-- 2.6.8-rc1-bk4
-
-* Wed Jul 14 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc1-bk3
-
-* Tue Jul 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- add "enforcemodulesig" boot option to make the kernel load signed modules only
-
-* Mon Jul 12 2004 Arjan van de Ven <arjanv@redhat.com>
-- updated voluntary preempt
-- 2.6.8-rc1
-
-* Wed Jul 7 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix boot breakage that was hitting lots of people (Dave Jones)
-
-* Tue Jul 6 2004 Arjan van de Ven <arjanv@redhat.com>
-- add voluntary preemption patch from Ingo
-- 2.6.7-bk19
-
-* Tue Jun 29 2004 Arjan van de Ven <arjanv@redhat.com>
-- make a start at gpg signed modules support
-
-* Sat Jun 27 2004 Arjan van de Ven <arjanv@redhat.com>
-- experiment with making the hardlink call in post more efficient
-- 2.6.7-bk9
-
-* Thu Jun 24 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.7-bk7
-- Add wli's patch to allocate memory bottom up not top down
-- change some config options in the kernel-sourcecode package that are
- good for rpm kernel builds but not for custom user builds to more appropriate
- default values.
-- reenable kernel-sourcecode again for a few builds
-
-* Wed Jun 23 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.7-bk5
-- fix tux unresolved symbols (#126532)
-
-* Mon Jun 21 2004 Arjan van de Ven <arjanv@redhat.com>
-- make kernel-doc and kernel-sourcecode builds independent of eachother
-- disable kernel-sourcecode builds entirely, we'll be replacing it with documentation
- on how to use the src.rpm instead for building your own kernel.
-
-* Sat Jun 19 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.7-bk2
-
-* Sun Jun 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- add patch from DaveM to fix the ppp-keeps-iface-busy bug
-
-* Sat Jun 12 2004 Arjan van de Ven <arjanv@redhat.com>
-- add fix from Andi Kleen/Linus for the fpu-DoS
-
-* Thu Jun 10 2004 Arjan van de Ven <arjanv@redhat.com>
-- disable mlock-uses-rlimit patch, it has a security hole and needs more thought
-- revert airo driver to the FC2 one since the new one breaks
-
-* Tue Jun 8 2004 Dave Jones <davej@redhat.com>
-- Update to 2.6.7rc3
-
-* Fri Jun 4 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix the mlock-uses-rlimit patch
-
-* Wed Jun 2 2004 David Woodhouse <dwmw2@redhat.com>
-- Add ppc64 (Mac G5)
-
-* Wed Jun 2 2004 Arjan van de Ven <arjanv@redhat.com>
-- add a forward port of the mlock-uses-rlimit patch
-- add NX support for x86 (Intel, Ingo)
-
-* Tue Jun 1 2004 Arjan van de Ven <arjanv@redhat.com>
-- refresh ext3 reservation patch
-
-* Sun May 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.7-rc2
-- set the ACPI OS name to "Microsoft Windows XP" for better compatibility
-
-* Thu May 27 2004 Pete Zaitcev <zaitcev@redhat.com>
-- Fix qeth and zfcp (s390 drivers): align qib by 256, embedded into qdio_irq.
-
-* Thu May 27 2004 Dave Jones <davej@redhat.com>
-- Fix the crashes on boot on Asus P4P800 boards. (#121819)
-
-* Wed May 26 2004 Dave Jones <davej@redhat.com>
-- Lots more updates to the SCSI whitelist for various
- USB card readers. (#112778, among others..)
-
-* Wed May 26 2004 Arjan van de Ven <arjanv@redhat.com>
-- back out ehci suspend/resume patch, it breaks
-- add fix for 3c59x-meets-kudzu bug from Alan
-
-* Tue May 25 2004 Arjan van de Ven <arjanv@redhat.com>
-- try improving suspend/resume by restoring more PCI state
-- 2.6.7-rc1-bk1
-
-* Mon May 24 2004 Dave Jones <davej@redhat.com>
-- Add yet another multi-card reader to the whitelist (#85851)
-
-* Sun May 23 2004 Dave Jones <davej@redhat.com>
-- Add another multi-card reader to the whitelist (#124048)
-
-* Wed May 19 2004 Arjan van de Ven <arjanv@redhat.com>
-- put firewire race fix in (datacorruptor)
-
-* Tue May 18 2004 Dave Jones <davej@redhat.com>
-- Fix typo in ibmtr driver preventing compile (#123391)
-
-* Mon May 17 2004 Arjan van de Ven <arjanv@redhat.com>
-- update to 2.6.6-bk3
-- made kernel-source and kernel-doc noarch.rpm's since they are not
- architecture specific.
-
-* Sat May 08 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix non-booting on Transmeta cpus (Peter Anvin)
-- fix count leak in message queues
-
-* Fri May 07 2004 Arjan van de Ven <arjanv@redhat.com>
-- more ide cache flush work
-- patch from scsi-bk to fix sd refcounting
-
-* Thu May 06 2004 Arjan van de Ven <arjanv@redhat.com>
-- some more ide cache flush fixes
-
-* Wed May 05 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix bug 122504
-- convert b44 to ethtool ops (jgarzik)
-- make IDE do a cache-flush on shutdown (me/Alan)
-
-* Tue May 04 2004 Arjan van de Ven <arjanv@redhat.com>
-- work around i810/i830 DRM issue
-
-* Fri Apr 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.6-rc3-bk1
-- make amd64 boot again
-- fix vm86-vs-4g4g interaction (Ingo)
-
-* Thu Apr 22 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.6-rc2
-
-* Tue Apr 20 2004 Arjan van de Ven <arjanv@redhat.com>
-- add the ext3 online resize patch
-
-* Mon Apr 19 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.6-rc1-bk3
-- add the objrmap vm from the -mm tree; it needs testing
-
-* Thu Apr 15 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-bk2
-- disable DISCONTIGMEM on ia64 for performance
-- fix sleep_on use in reiserfs (Chris Mason)
-
-* Tue Apr 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-mc4
-- reenable sg driver for scsi tape changers and such
-- the sk98lin driver oopses on module unload, preven that
-
-* Mon Apr 12 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix "bad pmd" bug with patch from Ingo
-
-* Fri Apr 09 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-mc3
-- finish up the -mc2 merge
-- latest 4g/4g patch from Ingo
-- latest execshield patch from Ingo
-- fix a few framebuffer bugs
-
-* Thu Apr 08 2004 Arjan van de Ven <arjanv@redhat.com>
-- first attempt at a 2.6.5-mc2 merge
-
-* Thu Apr 08 2004 Dave Jones <davej@redhat.com>
-- Add in missing SiS AGP fix.
-
-* Tue Apr 06 2004 Dave Jones <davej@redhat.com>
-- More agpgart fixes.
-
-* Fri Apr 02 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix another 4g/4g-vs-resume bug
-
-* Tue Mar 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-rc3
-- fix PCI posting bug in i830 DRM
-
-* Mon Mar 29 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-rc2-bk8
-
-* Mon Mar 29 2004 Dave Jones <davej@redhat.com>
-- Include latest agpgart fixes.
-
-* Thu Mar 25 2004 Arjan van de Ven <arjanv@redhat.com>
-- more DRM fixes
-- add the fsync patches from akpm
-
-* Tue Mar 23 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-rc2-bk3
-- fix direct userspace memory access in i830 drm driver
-
-* Mon Mar 22 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-rc2-bk2
-- some stackbloat reductions from Dave and me
-
-* Sat Mar 20 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.5-rc2
-
-* Tue Mar 16 2004 Dave Jones <davej@redhat.com>
-- 2.6.5-rc1
-
-* Mon Mar 15 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.4-bk3
-- fix oops in toshiba_acpi (Barry K. Nathan)
-
-* Sat Mar 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.4-bk2 merge
-
-* Thu Mar 11 2004 Arjan van de Ven <arjanv@redhat.com>
-- renable sonypi driver that was off accidentally
-- 2.6.4-final
-- fix the oops on alsa module unloads
-
-* Wed Mar 10 2004 Arjan van de Ven <arjanv@redhat.com>
-- add ppc64/iseries, ppc32 (powermac/ibook) and ia64 architectures
-- 2.6.4-rc3
-
-* Tue Mar 09 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.4-rc2-bk5
-- fix nfs-vs-selinux issue
-- fix typo in URL as per #117849
-
-* Mon Mar 08 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix race in lp.c (#117710)
-- 2.6.4-rc2-bk3
-- attempt to fix S3 suspend-to-ram with 4g/4g split
-
-* Sat Mar 06 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix reiserfs
-- set HZ to 1000 again for some tests
-
-* Wed Feb 25 2004 Arjan van de Ven <arjanv@redhat.com>
-- merge back a bunch of fedora fixes
-- disable audit
-
-* Tue Feb 24 2004 Arjan van de Ven <arjanv@redhat.com>
-- audit bugfixes
-- update tux to a working version
-- 2.6.3-bk5 merge
-
-* Fri Feb 20 2004 Arjan van de Ven <arjanv@redhat.com>
-- re-add and enable the Auditing patch
-- switch several cpufreq modules to built in since detecting in userspace
- which to use is unpleasant
-* Thu Jul 03 2003 Arjan van de Ven <arjanv@redhat.com>
-- 2.6 start
-
strncpy(menu_item, item, menu_width);
menu_item[menu_width] = 0;
- j = first_alpha(menu_item, "YyNnMmHh");
+ j = first_alpha(menu_item, "YyNnMm");
/* Clear 'residue' of last item */
wattrset (win, menubox_attr);
if (key < 256 && isalpha(key)) key = tolower(key);
- if (strchr("ynmh", key))
+ if (strchr("ynm", key))
i = max_choice;
else {
for (i = choice+1; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
if (i == max_choice)
for (i = 0; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
UTS_LEN=64
UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
-LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
+
# Generate a temporary compile.h
( echo /\* This file is auto generated, version $VERSION \*/
echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\"
echo \#define LINUX_COMPILE_TIME \"`LC_ALL=C LANG=C date +%T`\"
- echo \#define LINUX_COMPILE_BY \"support\"
- echo \#define LINUX_COMPILE_HOST \"planet-lab.org\"
+ echo \#define LINUX_COMPILE_BY \"`whoami`\"
+ echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\"
if [ -x /bin/dnsdomainname ]; then
echo \#define LINUX_COMPILE_DOMAIN \"`dnsdomainname | $UTS_TRUNCATE`\"
fi
echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
- echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
- echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
) > .tmpcompile
# Only replace the real compile.h if the new one is different,
--- /dev/null
+#!/bin/sh
+#
+# Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com>
+# Copyright (C) 2002 Randy Dunlap <rddunlap@osdl.org>
+# Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
+# Copyright (C) 2002 Hewlett-Packard Company
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+#
+# Rules to generate ikconfig.h from linux/.config:
+# - Retain lines that begin with "CONFIG_"
+# - Retain lines that begin with "# CONFIG_"
+# - lines that use double-quotes must \\-escape-quote them
+
+if [ $# -lt 2 ]
+then
+ echo "Usage: `basename $0` <configuration_file> <Makefile>"
+ exit 1
+fi
+
+config=$1
+makefile=$2
+
+cat << EOF
+#ifndef _IKCONFIG_H
+#define _IKCONFIG_H
+/*
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *
+ * This file is generated automatically by scripts/mkconfigs. Do not edit.
+ *
+ */
+static char const ikconfig_config[] __attribute__((unused)) =
+"CONFIG_BEGIN=n\\n\\
+$(sed < $config -n 's/"/\\"/g;/^#\? \?CONFIG_/s/.*/&\\n\\/p')
+CONFIG_END=n\\n";
+#endif /* _IKCONFIG_H */
+EOF
+++ /dev/null
-#!/bin/sh
-# Generates a small Makefile used in the root of the output
-# directory, to allow make to be started from there.
-# The Makefile also allow for more convinient build of external modules
-
-# Usage
-# $1 - Kernel src directory
-# $2 - Output directory
-# $3 - version
-# $4 - patchlevel
-
-
-cat << EOF
-# Automatically generated by $0: don't edit
-
-VERSION = $3
-PATCHLEVEL = $4
-
-KERNELSRC := $1
-KERNELOUTPUT := $2
-
-MAKEFLAGS += --no-print-directory
-
-all:
- \$(MAKE) -C \$(KERNELSRC) O=\$(KERNELOUTPUT)
-
-%::
- \$(MAKE) -C \$(KERNELSRC) O=\$(KERNELOUTPUT) \$@
-
-EOF
-
+++ /dev/null
-host-progs := modpost mk_elfconfig
-always := $(host-progs) empty.o
-
-modpost-objs := modpost.o file2alias.o sumversion.o
-
-# dependencies on generated files need to be listed explicitly
-
-$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
-
-quiet_cmd_elfconfig = MKELF $@
- cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
-
-$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
- $(call if_changed,elfconfig)
-
-targets += elfconfig.h
MKSPEC := $(srctree)/scripts/package/mkspec
PREV := set -e; cd ..;
-# rpm-pkg
.PHONY: rpm-pkg rpm
-$(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile
+$(objtree)/kernel.spec: $(MKSPEC)
$(CONFIG_SHELL) $(MKSPEC) > $@
rpm-pkg rpm: $(objtree)/kernel.spec
clean-rule += rm -f $(objtree)/kernel.spec
-# binrpm-pkg
-.PHONY: binrpm-pkg
-$(objtree)/binkernel.spec: $(MKSPEC) $(srctree)/Makefile
- $(CONFIG_SHELL) $(MKSPEC) prebuilt > $@
-
-binrpm-pkg: $(objtree)/binkernel.spec
- $(MAKE)
- set -e; \
- $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
- set -e; \
- mv -f $(objtree)/.tmp_version $(objtree)/.version
-
- $(RPM) --define "_builddir $(srctree)" --target $(UTS_MACHINE) -bb $<
-
-clean-rule += rm -f $(objtree)/binkernel.spec
-
# Deb target
# ---------------------------------------------------------------------------
#
# ---------------------------------------------------------------------------
help:
@echo ' rpm-pkg - Build the kernel as an RPM package'
- @echo ' binrpm-pkg - Build an rpm package containing the compiled kernel & modules'
@echo ' deb-pkg - Build the kernel as an deb package'
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
-# how we were called determines which rpms we build and how we build them
-if [ "$1" = "prebuilt" ]; then
- PREBUILT=true
-else
- PREBUILT=false
-fi
-
# starting to output the spec
if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
PROVIDES=kernel-drm
echo "Group: System Environment/Kernel"
echo "Vendor: The Linux Community"
echo "URL: http://www.kernel.org"
-
-if ! $PREBUILT; then
echo -n "Source: kernel-$VERSION.$PATCHLEVEL.$SUBLEVEL"
echo "$EXTRAVERSION.tar.gz" | sed -e "s/-//g"
-fi
-
echo "BuildRoot: /var/tmp/%{name}-%{PACKAGE_VERSION}-root"
echo "Provides: $PROVIDES"
echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :"
echo "%description"
echo "The Linux Kernel, the operating system core itself"
echo ""
-
-if ! $PREBUILT; then
echo "%prep"
echo "%setup -q"
echo ""
-fi
-
echo "%build"
-
-if ! $PREBUILT; then
echo "make clean && make"
echo ""
-fi
-
echo "%install"
echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib $RPM_BUILD_ROOT/lib/modules'
if (($line =~ /\.init$/ || $line =~ /\.init\./) &&
($from !~ /\.init$/ &&
$from !~ /\.init\./ &&
- $from !~ /\.eh_frame$/ &&
$from !~ /\.stab$/ &&
$from !~ /\.rodata$/ &&
$from !~ /\.text\.lock$/ &&
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/major.h>
-#include <linux/personality.h>
#include "avc.h"
#include "objsec.h"
if (rc)
return rc;
- /* Clear any possibly unsafe personality bits on exec: */
- current->personality &= ~PER_CLEAR_ON_SETID;
-
/* Set the security field to the new SID. */
bsec->sid = newsid;
}
endmenu
menu "Open Sound System"
- depends on SOUND!=n && (BROKEN || !SPARC64)
+ depends on SOUND!=n
config SOUND_PRIME
tristate "Open Sound System (DEPRECATED)"
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
- loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
- pos = *offset;
- if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
- return -EIO;
- if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
- return -EIO;
entry = data->entry;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->rbuffer;
if (buf == NULL)
return -EIO;
- if (pos >= buf->size)
+ if (file->f_pos >= (long)buf->size)
return 0;
- size = buf->size - pos;
+ size = buf->size - file->f_pos;
size = min(count, size);
- if (copy_to_user(buffer, buf->buffer + pos, size))
+ if (copy_to_user(buffer, buf->buffer + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->read)
- size = entry->c.ops->read(entry,
+ return entry->c.ops->read(entry,
data->file_private_data,
- file, buffer, count, pos);
+ file, buffer, count);
break;
}
- if ((ssize_t) size > 0)
- *offset = pos + size;
return size;
}
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
- loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
entry = data->entry;
- pos = *offset;
- if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
- return -EIO;
- if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
- return -EIO;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->wbuffer;
if (buf == NULL)
return -EIO;
- if (pos >= buf->len)
+ if (file->f_pos < 0)
+ return -EINVAL;
+ if (file->f_pos >= (long)buf->len)
return -ENOMEM;
- size = buf->len - pos;
+ size = buf->len - file->f_pos;
size = min(count, size);
- if (copy_from_user(buf->buffer + pos, buffer, size))
+ if (copy_from_user(buf->buffer + file->f_pos, buffer, size))
return -EFAULT;
- if ((long)buf->size < pos + size)
- buf->size = pos + size;
+ if ((long)buf->size < file->f_pos + size)
+ buf->size = file->f_pos + size;
+ file->f_pos += size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->write)
- size = entry->c.ops->write(entry,
+ return entry->c.ops->write(entry,
data->file_private_data,
- file, buffer, count, pos);
+ file, buffer, count);
break;
}
- if ((ssize_t) size > 0)
- *offset = pos + size;
return size;
}
else
printk("pcm_oss: read: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
}
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, 0);
if (ret < 0)
break;
}
else
printk("pcm_oss: readv: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
snd_pcm_oss_setup_t *psetup = NULL, *csetup = NULL;
int nonblock;
wait_queue_t wait;
- static char printed_comm[16];
-
- if (strncmp(printed_comm, current->comm, 16)) {
- printk("application %s uses obsolete OSS audio interface\n",
- current->comm);
- memcpy(printed_comm, current->comm, 16);
- }
+
+ printk("application %s uses obsolete OSS audio interface\n",current->comm);
snd_assert(cardnum >= 0 && cardnum < SNDRV_CARDS, return -ENXIO);
device = SNDRV_MINOR_OSS_DEVICE(minor) == SNDRV_MINOR_OSS_PCM1 ?
const char *snd_pcm_stream_name(snd_pcm_stream_t stream)
{
- snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return NULL);
+ snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return 0);
return snd_pcm_stream_names[stream];
}
const char *snd_pcm_access_name(snd_pcm_access_t access)
{
- snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return NULL);
+ snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return 0);
return snd_pcm_access_names[access];
}
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
- snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return NULL);
+ snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return 0);
return snd_pcm_format_names[format];
}
const char *snd_pcm_subformat_name(snd_pcm_subformat_t subformat)
{
- snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return NULL);
+ snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return 0);
return snd_pcm_subformat_names[subformat];
}
const char *snd_pcm_tstamp_mode_name(snd_pcm_tstamp_t mode)
{
- snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return NULL);
+ snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return 0);
return snd_pcm_tstamp_mode_names[mode];
}
const char *snd_pcm_state_name(snd_pcm_state_t state)
{
- snd_assert(state <= SNDRV_PCM_STATE_LAST, return NULL);
+ snd_assert(state <= SNDRV_PCM_STATE_LAST, return 0);
return snd_pcm_state_names[state];
}
}
static long snd_opl4_mem_proc_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *_buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *_buf, long count)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char* buf;
size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = entry->size - file->f_pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
return -ENOMEM;
- snd_opl4_read_memory(opl4, buf, pos, size);
+ snd_opl4_read_memory(opl4, buf, file->f_pos, size);
if (copy_to_user(_buf, buf, size)) {
vfree(buf);
return -EFAULT;
}
vfree(buf);
+ file->f_pos += size;
return size;
}
return 0;
}
static long snd_opl4_mem_proc_write(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, const char __user *_buf,
- unsigned long count, unsigned long pos)
+ struct file *file, const char __user *_buf, long count)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char *buf;
size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = entry->size - file->f_pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
vfree(buf);
return -EFAULT;
}
- snd_opl4_write_memory(opl4, buf, pos, size);
+ snd_opl4_write_memory(opl4, buf, file->f_pos, size);
vfree(buf);
+ file->f_pos += size;
return size;
}
return 0;
break;
if (snd_rawmidi_transmit(substream, &midi_byte, 1) != 1)
break;
-#ifdef SNDRV_SERIAL_MS124W_MB_NOCOMBO
+#if SNDRV_SERIAL_MS124W_MB_NOCOMBO
/* select exactly one of the four ports */
addr_byte = (1 << (substream->number + 4)) | 0x08;
#else
struct vx_rmh rmh;
int data_mode;
- *pipep = NULL;
+ *pipep = 0;
vx_init_rmh(&rmh, CMD_RES_PIPE);
vx_set_pipe_cmd_params(&rmh, capture, audioid, num_audio);
#if 0 // NYI
{
snd_pcm_runtime_t *runtime = subs->runtime;
vx_core_t *chip = snd_pcm_substream_chip(subs);
- vx_pipe_t *pipe = NULL;
+ vx_pipe_t *pipe = 0;
unsigned int audio;
int err;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
if (--pipe->references == 0) {
- chip->playback_pipes[pipe->number] = NULL;
+ chip->playback_pipes[pipe->number] = 0;
vx_free_pipe(chip, pipe);
}
if (! subs->runtime->private_data)
return -EINVAL;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
- chip->capture_pipes[pipe->number] = NULL;
+ chip->capture_pipes[pipe->number] = 0;
pipe_out_monitoring = pipe->monitoring_pipe;
if (pipe_out_monitoring) {
if (--pipe_out_monitoring->references == 0) {
vx_free_pipe(chip, pipe_out_monitoring);
- chip->playback_pipes[pipe->number] = NULL;
- pipe->monitoring_pipe = NULL;
+ chip->playback_pipes[pipe->number] = 0;
+ pipe->monitoring_pipe = 0;
}
}
chip->pcm[pcm->device] = NULL;
if (chip->playback_pipes) {
kfree(chip->playback_pipes);
- chip->playback_pipes = NULL;
+ chip->playback_pipes = 0;
}
if (chip->capture_pipes) {
kfree(chip->capture_pipes);
- chip->capture_pipes = NULL;
+ chip->capture_pipes = 0;
}
}
* Linux Video interface
*/
-static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long data)
+static int snd_tea575x_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
tea575x_t *tea = video_get_drvdata(dev);
- void __user *arg = (void __user *)data;
switch(cmd) {
case VIDIOCGCAP:
}
}
+static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(inode, file, cmd, arg, snd_tea575x_do_ioctl);
+}
+
/*
* initialize all the tea575x chips
*/
config SND_SB16_CSP
bool "Sound Blaster 16/AWE CSP support"
- depends on (SND_SB16 || SND_SBAWE) && (BROKEN || !PPC)
+ depends on SND_SB16 || SND_SBAWE
help
Say 'Y' to include support for CSP core. This special coprocessor
can do variable tasks like various compression and decompression
else
nblock->prev->next = nblock;
up(&alloc->memory_mutex);
- return NULL;
+ return 0;
}
pblock = pblock->next;
}
} gus_proc_private_t;
static long snd_gf1_mem_proc_dump(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
gus_proc_private_t *priv = snd_magic_cast(gus_proc_private_t, entry->private_data, return -ENXIO);
int err;
size = count;
- if (pos + size > priv->size)
- size = (long)priv->size - pos;
+ if (file->f_pos + size > priv->size)
+ size = (long)priv->size - file->f_pos;
if (size > 0) {
- if ((err = snd_gus_dram_read(gus, buf, pos, size, priv->rom)) < 0)
+ if ((err = snd_gus_dram_read(gus, buf, file->f_pos, size, priv->rom)) < 0)
return err;
+ file->f_pos += size;
return size;
}
return 0;
emu8k_pcm_t *rec = subs->runtime->private_data;
if (rec)
kfree(rec);
- subs->runtime->private_data = NULL;
+ subs->runtime->private_data = 0;
return 0;
}
runtime->hw.rate_max = 44100;
runtime->hw.channels_max = 2;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_sb8_hw_constraint_rate_channels, NULL,
+ snd_sb8_hw_constraint_rate_channels, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_sb8_hw_constraint_channels_rate, NULL,
+ snd_sb8_hw_constraint_channels_rate, 0,
SNDRV_PCM_HW_PARAM_RATE, -1);
break;
case SB_HW_201:
#else
#define DPRINT(cond, args...) \
if ((dev->debug & (cond)) == (cond)) { \
- snd_printk (args); \
+ snd_printk (##args); \
}
#endif
#else
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0x0 }
+ { 0x0, 0x0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned long) rbuf;
- rbuf = NULL;
+ rbuf = 0;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
+ if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
dev->sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, 0, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, 0, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
+ if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
snd_printk ("can't get memory stats.\n");
return -1;
} else {
u16 sample_short;
u32 length;
- u16 __user *data_end = NULL;
+ u16 __user *data_end = 0;
unsigned int i;
const unsigned int max_blksize = 4096/2;
unsigned int written;
if (snd_wavefront_cmd (dev,
header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- NULL, sample_hdr)) {
+ 0, sample_hdr)) {
snd_printk ("sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, 0, 0)) {
snd_printk ("download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
snd_printk ("download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
snd_printk ("download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) {
+ if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, 0, voices)) {
snd_printk ("cannot set number of voices to 32.\n");
goto gone_bad;
}
MODULE_PARM_DESC(wss,"change between ACI/WSS-mixer; use 0 and 1 - untested"
" default: do nothing; for PCM1-pro only");
-#ifdef DEBUG
+#if DEBUG
static void print_bits(unsigned char c)
{
int j;
static inline int aci_rawwrite(unsigned char byte)
{
if (busy_wait() >= 0) {
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "aci_rawwrite(%d)\n", byte);
#endif
outb(byte, COMMAND_REGISTER);
if (busy_wait() >= 0) {
byte=inb(STATUS_REGISTER);
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "%d = aci_rawread()\n", byte);
#endif
return byte;
} isapnp_ad1816_list[] __initdata = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7150),
- NULL },
+ 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7180),
- NULL },
+ 0 },
{0}
};
ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001),
ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000),
0, 0, 0, 1, 0},
- {NULL}
+ {0}
};
static struct isapnp_device_id id_table[] __devinitdata = {
{ "AC97_3D_CONTROL", 0x100 + AC97_3D_CONTROL, 16 },
{ "AC97_MODEM_RATE", 0x100 + AC97_MODEM_RATE, 16 },
{ "AC97_POWER_CONTROL", 0x100 + AC97_POWER_CONTROL, 16 },
- { NULL }
+ { 0 }
};
if (dev == NULL)
ssize_t ret = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
down(&state->sem);
#if 0
if (dmabuf->mapped) {
ad1889_set_wav_rate(ad1889_dev, 44100);
ad1889_set_wav_fmt(ad1889_dev, AFMT_S16_LE);
AD1889_WRITEW(ad1889_dev, AD_DSWADA, 0x0404); /* attenuation */
- return nonseekable_open(inode, file);
+ return 0;
}
static int ad1889_release(struct inode *inode, struct file *file)
if ((err = ad1889_ac97_init(dev, 0)) != 0)
goto err_free_dsp;
- if (((proc_root = proc_mkdir("driver/ad1889", NULL)) == NULL) ||
+ if (((proc_root = proc_mkdir("driver/ad1889", 0)) == NULL) ||
create_proc_read_entry("ac97", S_IFREG|S_IRUGO, proc_root, ac97_read_proc, dev->ac97_codec) == NULL ||
create_proc_read_entry("info", S_IFREG|S_IRUGO, proc_root, ad1889_read_proc, dev) == NULL)
goto err_free_dsp;
err_free_mem:
ad1889_free_dev(dev);
- pci_set_drvdata(pcidev, NULL);
+ pci_set_drvdata(pcidev, 0);
return -ENODEV;
}
size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : NULL;
+ struct ali_card *card = state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_read called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
const char __user *buffer, size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : NULL;
+ struct ali_card *card = state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_write called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
outl(0x00000000, card->iobase + ALI_INTERRUPTCR);
outl(0x00000000, card->iobase + ALI_INTERRUPTSR);
- return nonseekable_open(inode, file);
+ return 0;
}
static int ali_release(struct inode *inode, struct file *file)
if (card->ac97_codec[i] != NULL
&& card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
}
return -ENODEV;
static int au1000_open_mixdev(struct inode *inode, struct file *file)
{
file->private_data = &au1000_state;
- return nonseekable_open(inode, file);
+ return 0;
}
static int au1000_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, usercnt, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dbg("write: count=%d", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int au1000_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int cm_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int cm_release(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4281: cs4281_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4281: cs4281_open()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ |
FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
tmp &= 0xFFFF;
tmp |= card->pctl;
CS_DBGOUT(CS_PARMS, 6, printk(
- "cs46xx: start_dac() poke card=%p tmp=0x%.08x addr=%p \n",
- card, (unsigned)tmp,
- card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
+ "cs46xx: start_dac() poke card=0x%.08x tmp=0x%.08x addr=0x%.08x \n",
+ (unsigned)card, (unsigned)tmp,
+ (unsigned)card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
cs461x_poke(card, BA1_PCTL, tmp);
}
spin_unlock_irqrestore(&card->lock, flags);
memset(dmabuf->rawbuf,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
(unsigned)hwptr);
- memset((char *)dmabuf->rawbuf +
- dmabuf->dmasize + hwptr - diff,
+ memset((void *)((unsigned)dmabuf->rawbuf +
+ dmabuf->dmasize + hwptr - diff),
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
diff - hwptr);
}
unsigned ptr;
int cnt;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
unsigned ptr;
int cnt;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: CopySamples()+ ") );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
- " dst=%p src=%p count=%d fmt=0x%x\n",
- dst,src,count,fmt) );
+ " dst=0x%x src=0x%x count=%d fmt=0x%x\n",
+ (unsigned)dst,(unsigned)src,(unsigned)count,(unsigned)fmt) );
/*
* See if the data should be output as 8-bit unsigned stereo.
return -ENODEV;
dmabuf = &state->dmabuf;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dmabuf->dmasize,dmabuf->count,buffer,ret) );
if (cs_copy_to_user(state, buffer,
- (char *)dmabuf->rawbuf + swptr, cnt, &copied))
+ (void *)((unsigned)dmabuf->rawbuf + swptr), cnt, &copied))
{
if (!ret) ret = -EFAULT;
goto out;
return -EFAULT;
dmabuf = &state->dmabuf;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
down(&state->sem);
if (dmabuf->mapped)
{
int ret = 0;
unsigned long size;
- CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=%p %s %s\n",
- file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
+ CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=0x%x %s %s\n",
+ (unsigned)file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
vma->vm_flags & VM_READ ? "VM_READ" : "") );
if (vma->vm_flags & VM_WRITE) {
* use the DAC only.
*/
state = card->states[1];
- if (!state) {
+ if(!(unsigned)state)
+ {
ret = -EINVAL;
goto out;
}
{
struct cs_card *card = (struct cs_card *)file->private_data;
struct cs_state *state;
- struct dmabuf *dmabuf=NULL;
+ struct dmabuf *dmabuf=0;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
int ret=0;
unsigned int tmp;
- CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=%p %s %s\n",
- file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=0x%x %s %s\n",
+ (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
list_for_each(entry, &cs46xx_devs)
return ret;
}
CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n") );
- return nonseekable_open(inode, file);
+ return 0;
}
static int cs_release(struct inode *inode, struct file *file)
struct dmabuf *dmabuf;
struct cs_state *state;
unsigned int tmp;
- CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=%p %s %s\n",
- file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=0x%x %s %s\n",
+ (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
{
unsigned int tmp;
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
- printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=%p\n",
- (unsigned)card->pm.flags,card));
+ printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=0x%x\n",
+ (unsigned)card->pm.flags,(unsigned)card));
/*
* check the current state, only suspend if IDLE
*/
CS_INC_USE_COUNT(&card->mixer_use_cnt);
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs46xx: cs_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
static int cs_release_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
"cs46xx: cs_ac97_init()- codec number %d not found\n",
num_ac97) );
- card->ac97_codec[num_ac97] = NULL;
+ card->ac97_codec[num_ac97] = 0;
break;
}
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
card->ac97_codec[num_ac97] = codec;
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
- "cs46xx: cs_ac97_init() ac97_codec[%d] set to %p\n",
+ "cs46xx: cs_ac97_init() ac97_codec[%d] set to 0x%x\n",
(unsigned int)num_ac97,
- codec));
+ (unsigned int)codec));
/* if there is no secondary codec at all, don't probe any more */
if (!ready_2nd)
{
card->ba1.name.reg = ioremap_nocache(card->ba1_addr + BA1_SP_REG, CS461X_BA1_REG_SIZE);
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card=%p card->ba0=%p\n",card,card->ba0) );
+ "cs46xx: card=0x%x card->ba0=0x%.08x\n",(unsigned)card,(unsigned)card->ba0) );
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card->ba1=%p %p %p %p\n",
- card->ba1.name.data0,
- card->ba1.name.data1,
- card->ba1.name.pmem,
- card->ba1.name.reg) );
+ "cs46xx: card->ba1=0x%.08x 0x%.08x 0x%.08x 0x%.08x\n",
+ (unsigned)card->ba1.name.data0,
+ (unsigned)card->ba1.name.data1,
+ (unsigned)card->ba1.name.pmem,
+ (unsigned)card->ba1.name.reg) );
if(card->ba0 == 0 || card->ba1.name.data0 == 0 ||
card->ba1.name.data1 == 0 || card->ba1.name.pmem == 0 ||
if (pmdev)
{
CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
- "cs46xx: probe() pm_register() succeeded (%p).\n",
- pmdev));
+ "cs46xx: probe() pm_register() succeeded (0x%x).\n",
+ (unsigned)pmdev));
pmdev->data = card;
}
else
{
CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: probe() pm_register() failed (%p).\n",
- pmdev));
+ "cs46xx: probe() pm_register() failed (0x%x).\n",
+ (unsigned)pmdev));
card->pm.flags |= CS46XX_PM_NOT_REGISTERED;
}
- CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n",
- (unsigned)card->pm.flags,card));
+ CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=0x%x\n",
+ (unsigned)card->pm.flags,(unsigned)card));
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
"cs46xx: probe()- device allocated successfully\n"));
struct cs_card *card;
CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n",
- dev,(unsigned)rqst,data));
+ "cs46xx: cs46xx_pm_callback dev=0x%x rqst=0x%x card=%d\n",
+ (unsigned)dev,(unsigned)rqst,(unsigned)data));
card = (struct cs_card *) dev->data;
if (card) {
switch(rqst) {
*/
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, u32 state);
static int cs46xx_resume_tbl(struct pci_dev *pcidev);
-#define cs_pm_register(a, b, c) NULL
+#define cs_pm_register(a, b, c) 0
#define cs_pm_unregister_all(a)
#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl
#define CS46XX_RESUME_TBL cs46xx_resume_tbl
#ifndef _dmasound_h_
/*
- * linux/sound/oss/dmasound/dmasound.h
+ * linux/drivers/sound/dmasound/dmasound.h
*
*
* Minor numbers for the sound driver.
#define le2be16dbl(x) (((x)<<8 & 0xff00ff00) | ((x)>>8 & 0x00ff00ff))
#define IOCTL_IN(arg, ret) \
- do { int error = get_user(ret, (int __user *)(arg)); \
+ do { int error = get_user(ret, (int *)(arg)); \
if (error) return error; \
} while (0)
-#define IOCTL_OUT(arg, ret) ioctl_return((int __user *)(arg), ret)
+#define IOCTL_OUT(arg, ret) ioctl_return((int *)(arg), ret)
-static inline int ioctl_return(int __user *addr, int value)
+static inline int ioctl_return(int *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
*/
typedef struct {
- ssize_t (*ct_ulaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_alaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_ulaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_alaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
} TRANS;
struct sound_settings {
/*
- * linux/sound/oss/dmasound/dmasound_atari.c
+ * linux/drivers/sound/dmasound/dmasound_atari.c
*
* Atari TT and Falcon DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/sound/oss/dmasound/dmasound_awacs.c
+ * linux/drivers/sound/dmasound/dmasound_awacs.c
*
* PowerMac `AWACS' and `Burgundy' DMA Sound Driver
* with some limited support for DACA & Tumbler
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
* history prior to 2001/01/26.
*
* 26/01/2001 ed 0.1 Iain Sandoe
#undef IOCTL_OUT
#define IOCTL_IN(arg, ret) \
- rc = get_user(ret, (int __user *)(arg)); \
+ rc = get_user(ret, (int *)(arg)); \
if (rc) break;
#define IOCTL_OUT(arg, ret) \
- ioctl_return2((int __user *)(arg), ret)
+ ioctl_return2((int *)(arg), ret)
-static inline int ioctl_return2(int __user *addr, int value)
+static inline int ioctl_return2(int *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(100);
if (gpio_headphone_irq) {
- if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",NULL) < 0) {
+ if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",0) < 0) {
printk(KERN_ERR "tumbler: Can't request headphone interrupt\n");
gpio_headphone_irq = 0;
} else {
val = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio_headphone_detect, 0);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio_headphone_detect, val | 0x80);
/* Trigger it */
- headphone_intr(0,NULL,NULL);
+ headphone_intr(0,0,0);
}
}
if (!gpio_headphone_irq) {
tas_dmasound_cleanup(void)
{
if (gpio_headphone_irq)
- free_irq(gpio_headphone_irq, NULL);
+ free_irq(gpio_headphone_irq, 0);
return 0;
}
static int
tas_mixer_ioctl(u_int cmd, u_long arg)
{
- int __user *argp = (int __user *)arg;
int data;
int rc;
if ((cmd & ~0xff) == MIXER_WRITE(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
- rc = get_user(data, argp);
+ rc = get_user(data, (int *)(arg));
if (rc<0) return rc;
tas_set_mixer_level(cmd & 0xff, data);
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2(argp, data);
+ return ioctl_return2((int *)(arg), data);
}
if ((cmd & ~0xff) == MIXER_READ(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2(argp, data);
+ return ioctl_return2((int *)(arg), data);
}
switch(cmd) {
static int __init PMacIrqInit(void)
{
if (awacs)
- if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", NULL))
+ if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", 0))
return 0;
- if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", NULL)
- || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", NULL))
+ if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", 0)
+ || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", 0))
return 0;
return 1;
}
msleep(200);
}
if (awacs)
- free_irq(awacs_irq, NULL);
- free_irq(awacs_tx_irq, NULL);
- free_irq(awacs_rx_irq, NULL);
+ free_irq(awacs_irq, 0);
+ free_irq(awacs_tx_irq, 0);
+ free_irq(awacs_rx_irq, 0);
if (awacs)
iounmap((void *)awacs);
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(150);
tas_leave_sleep(); /* Stub for now */
- headphone_intr(0,NULL,NULL);
+ headphone_intr(0,0,0);
break;
case AWACS_DACA:
msleep(10); /* Check this !!! */
sound_device_id = 0;
/* device ID appears post g3 b&w */
- prop = (unsigned int *)get_property(info, "device-id", NULL);
+ prop = (unsigned int *)get_property(info, "device-id", 0);
if (prop != 0)
sound_device_id = *prop;
} else if (is_pbook_g3) {
struct device_node* mio;
- macio_base = NULL;
+ macio_base = 0;
for (mio = io->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0
&& mio->n_addrs > 0) {
/*
- * linux/sound/oss/dmasound/dmasound_core.c
+ * linux/drivers/sound/dmasound/dmasound_core.c
*
*
* OSS/Free compatible Atari TT/Falcon and Amiga DMA sound driver for
return stereo;
}
-static ssize_t sound_copy_translate(TRANS *trans, const u_char __user *userPtr,
+static ssize_t sound_copy_translate(TRANS *trans, const u_char *userPtr,
size_t userCount, u_char frame[],
ssize_t *frameUsed, ssize_t frameLeft)
{
- ssize_t (*ct_func)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_func)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
switch (dmasound.soft.format) {
case AFMT_MU_LAW:
strlcpy(info.id, dmasound.mach.name2, sizeof(info.id));
strlcpy(info.name, dmasound.mach.name2, sizeof(info.name));
info.modify_counter = mixer.modify_counter;
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((int *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
while (i--)
dmasound.mach.dma_free(sq->buffers[i], size);
kfree(sq->buffers);
- sq->buffers = NULL;
+ sq->buffers = 0;
return -ENOMEM;
}
}
static int sq_setup(struct sound_queue *sq)
{
- int (*setup_func)(void) = NULL;
+ int (*setup_func)(void) = 0;
int hard_frame ;
if (sq->locked) { /* are we already set? - and not changeable */
dmasound.mach.play();
}
-static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft,
+static ssize_t sq_write(struct file *file, const char *src, size_t uLeft,
loff_t *ppos)
{
ssize_t uWritten = 0;
* it and restart the DMA.
*/
-static ssize_t sq_read(struct file *file, char __user *dst, size_t uLeft,
+static ssize_t sq_read(struct file *file, char *dst, size_t uLeft,
loff_t *ppos)
{
info.fragstotal = write_sq.max_active;
info.fragsize = write_sq.user_frag_size;
info.bytes = info.fragments * info.fragsize;
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((void *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
} else
return 0;
}
-static ssize_t state_read(struct file *file, char __user *buf, size_t count,
+static ssize_t state_read(struct file *file, char *buf, size_t count,
loff_t *ppos)
{
int n = state.len - state.ptr;
/*
- * linux/sound/oss/dmasound/dmasound_paula.c
+ * linux/drivers/sound/dmasound/dmasound_paula.c
*
* Amiga `Paula' DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/sound/oss/dmasound/dmasound_q40.c
+ * linux/drivers/sound/dmasound/dmasound_q40.c
*
* Q40 DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
{
int rc;
struct tas_biquad_ctrl_t biquad;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[2][6];
struct tas_biquad_ctrl_t biquad;
- struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
+ if (copy_from_user((void *)&filter_count,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
+ sizeof(int))) {
return -EFAULT;
+ }
- if (copy_from_user(&flags, &argp->flags, sizeof(int)))
+ if (copy_from_user((void *)&flags,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
+ sizeof(int))) {
return -EFAULT;
+ }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user(&biquad, &argp->biquads[i],
+ if (copy_from_user((void *)&biquad,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(&argp->biquads[i], &biquad,
+ if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ (const void *)&biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
+ sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
+ }
#ifdef DEBUG_DRCE
printk("DRCE IOCTL: input [ FLAGS:%x ENABLE:%x THRESH:%x\n",
if (drce_ctrl.flags & TAS_DRCE_THRESHOLD)
drce_ctrl.data.threshold = self->drce_state.threshold;
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
- uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3001c_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3001C_BIQUAD_FILTER_COUNT, argp);
+ put_user(TAS3001C_BIQUAD_FILTER_COUNT, (uint *)(arg));
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, argp);
+ put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
return 0;
case TAS_READ_DRCE:
return tas3001c_drce_rw(self, cmd, arg);
case TAS_READ_DRCE_CAPS:
- put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, argp);
+ put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, (uint *)(arg));
return 0;
case TAS_READ_DRCE_MIN:
case TAS_READ_DRCE_MAX: {
struct tas_drce_ctrl_t drce_ctrl;
- if (copy_from_user(&drce_ctrl, argp,
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
}
}
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
static struct tas_drce_t eqp_0e_2_1_drce = {
.enable = 1,
- .above = { .val = 3.0 * (1<<8), .expand = 0 },
- .below = { .val = 1.0 * (1<<8), .expand = 0 },
- .threshold = -15.33 * (1<<8),
- .energy = 2.4 * (1<<12),
- .attack = 0.013 * (1<<12),
- .decay = 0.212 * (1<<12),
+ .above { .val = 3.0 * (1<<8), .expand = 0 },
+ .below { .val = 1.0 * (1<<8), .expand = 0 },
+ .threshold -15.33 * (1<<8),
+ .energy 2.4 * (1<<12),
+ .attack 0.013 * (1<<12),
+ .decay 0.212 * (1<<12),
};
static struct tas_biquad_ctrl_t eqp_0e_2_1_biquads[]={
u_int cmd,
u_long arg)
{
- void __user *argp = (void __user *)arg;
int rc;
struct tas_biquad_ctrl_t biquad;
- if (copy_from_user((void *)&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[TAS3004_BIQUAD_CHANNEL_COUNT][TAS3004_BIQUAD_FILTER_COUNT];
struct tas_biquad_ctrl_t biquad;
- struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
+ if (copy_from_user((void *)&filter_count,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
+ sizeof(int))) {
return -EFAULT;
+ }
- if (copy_from_user(&flags, &argp->flags, sizeof(int)))
+ if (copy_from_user((void *)&flags,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
+ sizeof(int))) {
return -EFAULT;
+ }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user(&biquad, &argp->biquads[i],
+ if (copy_from_user((void *)&biquad,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(&argp->biquads[i], &biquad,
+ if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ (const void *)&biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
+ sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
+ }
#ifdef DEBUG_DRCE
printk("DRCE: input [ FLAGS:%x ENABLE:%x ABOVE:%x/%x BELOW:%x/%x THRESH:%x ENERGY:%x ATTACK:%x DECAY:%x\n",
if (drce_ctrl.flags & TAS_DRCE_DECAY)
drce_ctrl.data.decay = self->drce_state.decay;
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
- uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3004_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3004_BIQUAD_FILTER_COUNT, argp);
+ put_user(TAS3004_BIQUAD_FILTER_COUNT, (uint *)(arg));
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3004_BIQUAD_CHANNEL_COUNT, argp);
+ put_user(TAS3004_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
return 0;
case TAS_READ_DRCE:
TAS_DRCE_ENERGY |
TAS_DRCE_ATTACK |
TAS_DRCE_DECAY,
- argp);
+ (uint *)(arg));
return 0;
case TAS_READ_DRCE_MIN:
struct tas_drce_ctrl_t drce_ctrl;
const struct tas_drce_t *drce_copy;
- if (copy_from_user(&drce_ctrl, argp,
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
drce_ctrl.data.decay=drce_copy->decay;
}
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
/*
- * linux/sound/oss/dmasound/trans_16.c
+ * linux/drivers/sound/dmasound/trans_16.c
*
* 16 bit translation routines. Only used by Power mac at present.
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
* history prior to 08/02/2001.
*
* 08/02/2001 Iain Sandoe
static short dmasound_alaw2dma16[] ;
static short dmasound_ulaw2dma16[] ;
-static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static int expand_data; /* Data for expanding */
-static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
userCount >>= (stereo? 2: 1);
used = count = min_t(unsigned long, userCount, frameLeft);
if (!stereo) {
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
while (count > 0) {
short data;
if (get_user(data, up++))
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
}
-static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short __user *up = (unsigned short __user *) userPtr;
+ unsigned short *up = (unsigned short *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
}
-static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short __user *up = (unsigned short __user *) userPtr;
+ unsigned short *up = (unsigned short *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
/* data in routines... */
-static ssize_t pmac_ct_s8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
p++;
}
-static ssize_t pmac_ct_u8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
p++;
return stereo? used * 2: used;
}
-static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
/* data in routines (reducing speed)... */
-static ssize_t pmac_ctx_s8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = vall >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
data = valr >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
userCount--;
}
-static ssize_t pmac_ctx_u8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = (vall >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
data = (valr >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
userCount--;
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
return stereo? utotal * 4: utotal * 2;
}
-static ssize_t pmac_ctx_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
DPD(3, "emu10k1_audio_read(), buffer=%p, count=%d\n", buffer, (u32) count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(3, "emu10k1_audio_write(), buffer=%p, count=%d\n", buffer, (u32) count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
file->private_data = (void *) wave_dev;
- return nonseekable_open(inode, file);
+ return 0;
}
static int emu10k1_audio_release(struct inode *inode, struct file *file)
{
char s[48];
- if (!proc_mkdir ("driver/emu10k1", NULL)) {
+ if (!proc_mkdir ("driver/emu10k1", 0)) {
printk(KERN_ERR "emu10k1: unable to create proc directory driver/emu10k1\n");
goto err_out;
}
sprintf(s, "driver/emu10k1/%s", pci_name(card->pci_dev));
- if (!proc_mkdir (s, NULL)) {
+ if (!proc_mkdir (s, 0)) {
printk(KERN_ERR "emu10k1: unable to create proc directory %s\n", s);
goto err_emu10k1_proc;
}
sprintf(s, "driver/emu10k1/%s/info", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, NULL, emu10k1_info_proc, card)) {
+ if (!create_proc_read_entry (s, 0, 0, emu10k1_info_proc, card)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_dev_proc;
}
if (!card->is_aps) {
sprintf(s, "driver/emu10k1/%s/ac97", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, NULL, ac97_read_proc, card->ac97)) {
+ if (!create_proc_read_entry (s, 0, 0, ac97_read_proc, card->ac97)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_proc_ac97;
}
up(&card->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int emu10k1_midi_release(struct inode *inode, struct file *file)
DPD(4, "emu10k1_midi_read(), count %#x\n", (u32) count);
+ if (pos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(4, "emu10k1_midi_write(), count=%#x\n", (u32) count);
+ if (pos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (card->seq_mididev) {
kfree(card->seq_mididev);
- card->seq_mididev = NULL;
+ card->seq_mididev = 0;
}
}
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_midi_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_midi_release(struct inode *inode, struct file *file)
return -ENODEV;
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
prog_codec(s);
- return nonseekable_open(inode, file);
+ return 0;
}
static /*const*/ struct file_operations solo1_audio_fops = {
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_midi_release(struct inode *inode, struct file *file)
outb(1, s->sbbase+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_dmfm_release(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_READ)
forte_channel_init (forte, &forte->rec);
- return nonseekable_open(inode, file);
+ return 0;
}
unsigned int i = bytes, sz = 0;
unsigned long flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok (VERIFY_READ, buffer, bytes))
return -EFAULT;
unsigned int i = bytes, sz;
unsigned long flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok (VERIFY_WRITE, buffer, bytes))
return -EFAULT;
static int __init
forte_proc_init (void)
{
- if (!proc_mkdir ("driver/forte", NULL))
+ if (!proc_mkdir ("driver/forte", 0))
return -EIO;
- if (!create_proc_read_entry ("driver/forte/chip", 0, NULL, forte_proc_read, forte)) {
+ if (!create_proc_read_entry ("driver/forte/chip", 0, 0, forte_proc_read, forte)) {
remove_proc_entry ("driver/forte", NULL);
return -EIO;
}
- if (!create_proc_read_entry("driver/forte/ac97", 0, NULL, ac97_read_proc, forte->ac97)) {
+ if (!create_proc_read_entry("driver/forte/ac97", 0, 0, ac97_read_proc, forte->ac97)) {
remove_proc_entry ("driver/forte/chip", NULL);
remove_proc_entry ("driver/forte", NULL);
return -EIO;
{
gus_wave_init(hw_config);
+ request_region(hw_config->io_base, 16, "GUS");
+ request_region(hw_config->io_base + 0x100, 12, "GUS"); /* 0x10c-> is MAX */
+
if (sound_alloc_dma(hw_config->dma, "GUS"))
printk(KERN_ERR "gus_card.c: Can't allocate DMA channel %d\n", hw_config->dma);
if (hw_config->dma2 != -1 && hw_config->dma2 != hw_config->dma)
printk(KERN_ERR "GUS: Unsupported IRQ %d\n", irq);
return 0;
}
- if (gus_wave_detect(hw_config->io_base))
+ if (check_region(hw_config->io_base, 16))
+ printk(KERN_ERR "GUS: I/O range conflict (1)\n");
+ else if (check_region(hw_config->io_base + 0x100, 16))
+ printk(KERN_ERR "GUS: I/O range conflict (2)\n");
+ else if (gus_wave_detect(hw_config->io_base))
return 1;
#ifndef EXCLUDE_GUS_IODETECT
* Look at the possible base addresses (0x2X0, X=1, 2, 3, 4, 5, 6)
*/
- for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10) {
- if (io_addr == hw_config->io_base) /* Already tested */
- continue;
- if (gus_wave_detect(io_addr)) {
- hw_config->io_base = io_addr;
- return 1;
- }
- }
+ for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10)
+ if (io_addr != hw_config->io_base) /*
+ * Already tested
+ */
+ if (!check_region(io_addr, 16))
+ if (!check_region(io_addr + 0x100, 16))
+ if (gus_wave_detect(io_addr))
+ {
+ hw_config->io_base = io_addr;
+ return 1;
+ }
#endif
printk("NO GUS card found !\n");
unsigned long loc;
unsigned char val;
- if (!request_region(baseaddr, 16, "GUS"))
- return 0;
- if (!request_region(baseaddr + 0x100, 12, "GUS")) { /* 0x10c-> is MAX */
- release_region(baseaddr, 16);
- return 0;
- }
-
gus_base = baseaddr;
gus_write8(0x4c, 0); /* Reset GF1 */
/* See if there is first block there.... */
gus_poke(0L, 0xaa);
- if (gus_peek(0L) != 0xaa) {
- release_region(baseaddr + 0x100, 12);
- release_region(baseaddr, 16);
- return 0;
- }
+ if (gus_peek(0L) != 0xaa)
+ return (0);
/* Now zero it out so that I can check for mirroring .. */
gus_poke(0L, 0x00);
if (hal2) {
file->private_data = hal2;
- return nonseekable_open(inode, file);
+ return 0;
}
return -ENODEV;
}
if (!count)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (down_interruptible(&adc->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
if (!count)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (down_interruptible(&dac->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
dac->usecount++;
}
- return nonseekable_open(inode, file);
+ return 0;
}
static int hal2_release(struct inode *inode, struct file *file)
/* extract register offset from codec struct */
#define IO_REG_OFF(codec) (((struct i810_card *) codec->private_data)->ac97_id_map[codec->id])
-#define I810_IOREAD(size, type, card, off) \
-({ \
- type val; \
- if (card->use_mmio) \
- val=read##size(card->iobase_mmio+off); \
- else \
- val=in##size(card->iobase+off); \
- val; \
-})
-
-#define I810_IOREADL(card, off) I810_IOREAD(l, u32, card, off)
-#define I810_IOREADW(card, off) I810_IOREAD(w, u16, card, off)
-#define I810_IOREADB(card, off) I810_IOREAD(b, u8, card, off)
-
-#define I810_IOWRITE(size, val, card, off) \
-({ \
- if (card->use_mmio) \
- write##size(val, card->iobase_mmio+off); \
- else \
- out##size(val, card->iobase+off); \
-})
-
-#define I810_IOWRITEL(val, card, off) I810_IOWRITE(l, val, card, off)
-#define I810_IOWRITEW(val, card, off) I810_IOWRITE(w, val, card, off)
-#define I810_IOWRITEB(val, card, off) I810_IOWRITE(b, val, card, off)
-
-#define GET_CIV(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_CIV), SG_LEN)
-#define GET_LVI(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_LVI), SG_LEN)
+#define GET_CIV(port) MODULOP2(inb((port) + OFF_CIV), SG_LEN)
+#define GET_LVI(port) MODULOP2(inb((port) + OFF_LVI), SG_LEN)
/* set LVI from CIV */
-#define CIV_TO_LVI(card, port, off) \
- I810_IOWRITEB(MODULOP2(GET_CIV((card), (port)) + (off), SG_LEN), (card), (port) + OFF_LVI)
+#define CIV_TO_LVI(port, off) \
+ outb(MODULOP2(GET_CIV((port)) + (off), SG_LEN), (port) + OFF_LVI)
static struct i810_card *devs = NULL;
return 0;
if (rec)
- port = dmabuf->read_channel->port;
+ port = state->card->iobase + dmabuf->read_channel->port;
else
- port = dmabuf->write_channel->port;
+ port = state->card->iobase + dmabuf->write_channel->port;
if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
port_picb = port + OFF_SR;
port_picb = port + OFF_PICB;
do {
- civ = GET_CIV(state->card, port);
- offset = I810_IOREADW(state->card, port_picb);
+ civ = GET_CIV(port);
+ offset = inw(port_picb);
/* Must have a delay here! */
if(offset == 0)
udelay(1);
* that we won't have to worry about the chip still being
* out of sync with reality ;-)
*/
- } while (civ != GET_CIV(state->card, port) || offset != I810_IOREADW(state->card, port_picb));
+ } while (civ != GET_CIV(port) || offset != inw(port_picb));
return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
% dmabuf->dmasize);
struct i810_card *card = state->card;
dmabuf->enable &= ~ADC_RUNNING;
- I810_IOWRITEB(0, card, PI_CR);
+ outb(0, card->iobase + PI_CR);
// wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PI_CR) != 0 ) ;
+ while( inb(card->iobase + PI_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PI_PICB), card, PI_PICB );
+ outb( inb(card->iobase + PI_PICB), card->iobase + PI_PICB );
else
- I810_IOWRITEB( I810_IOREADB(card, PI_SR), card, PI_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PI, card, GLOB_STA);
+ outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
+ outl( inl(card->iobase + GLOB_STA) & INT_PI, card->iobase + GLOB_STA);
}
static void stop_adc(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_INPUT)) {
dmabuf->enable |= ADC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PI_CR);
+ outb(0x10 | 0x04 | 0x01, state->card->iobase + PI_CR);
}
}
struct i810_card *card = state->card;
dmabuf->enable &= ~DAC_RUNNING;
- I810_IOWRITEB(0, card, PO_CR);
+ outb(0, card->iobase + PO_CR);
// wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PO_CR) != 0 ) ;
+ while( inb(card->iobase + PO_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PO_PICB), card, PO_PICB );
+ outb( inb(card->iobase + PO_PICB), card->iobase + PO_PICB );
else
- I810_IOWRITEB( I810_IOREADB(card, PO_SR), card, PO_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PO, card, GLOB_STA);
+ outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
+ outl( inl(card->iobase + GLOB_STA) & INT_PO, card->iobase + GLOB_STA);
}
static void stop_dac(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
dmabuf->enable |= DAC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PO_CR);
+ outb(0x10 | 0x04 | 0x01, state->card->iobase + PO_CR);
}
}
static void start_dac(struct i810_state *state)
sg++;
}
spin_lock_irqsave(&state->card->lock, flags);
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while( I810_IOREADB(state->card, c->port+OFF_CR) & 0x02 ) ;
- I810_IOWRITEL((u32)state->card->chandma +
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ while( inb(state->card->iobase+c->port+OFF_CR) & 0x02 ) ;
+ outl((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
+ state->card->iobase+c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card->iobase+c->port, 0);
spin_unlock_irqrestore(&state->card->lock, flags);
void (*start)(struct i810_state *);
count = dmabuf->count;
+ port = state->card->iobase;
if (rec) {
- port = dmabuf->read_channel->port;
+ port += dmabuf->read_channel->port;
trigger = PCM_ENABLE_INPUT;
start = __start_adc;
count = dmabuf->dmasize - count;
} else {
- port = dmabuf->write_channel->port;
+ port += dmabuf->write_channel->port;
trigger = PCM_ENABLE_OUTPUT;
start = __start_dac;
}
return;
start(state);
- while (!(I810_IOREADB(state->card, port + OFF_CR) & ((1<<4) | (1<<2))))
+ while (!(inb(port + OFF_CR) & ((1<<4) | (1<<2))))
;
}
/* MASKP2(swptr, fragsize) - 1 is the tail of our transfer */
x = MODULOP2(MASKP2(dmabuf->swptr, fragsize) - 1, dmabuf->dmasize);
x >>= dmabuf->fragshift;
- I810_IOWRITEB(x, state->card, port + OFF_LVI);
+ outb(x, port + OFF_LVI);
}
static void i810_update_lvi(struct i810_state *state, int rec)
/* this is normal for the end of a read */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card, PI_BASE) !=
- GET_LVI(state->card, PI_BASE)) {
+ if (GET_CIV(state->card->iobase + PI_BASE) !=
+ GET_LVI(state->card->iobase + PI_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
dmabuf->error++;
}
/* this is normal for the end of a write */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card, PO_BASE) !=
- GET_LVI(state->card, PO_BASE)) {
+ if (GET_CIV(state->card->iobase + PO_BASE) !=
+ GET_LVI(state->card->iobase + PO_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
"count %d\n",
- GET_CIV(state->card, PO_BASE),
- GET_LVI(state->card, PO_BASE),
+ GET_CIV(state->card->iobase + PO_BASE),
+ GET_LVI(state->card->iobase + PO_BASE),
dmabuf->hwptr, dmabuf->count);
dmabuf->error++;
}
struct i810_state *state = card->states[i];
struct i810_channel *c;
struct dmabuf *dmabuf;
- unsigned long port;
+ unsigned long port = card->iobase;
u16 status;
if(!state)
} else /* This can occur going from R/W to close */
continue;
- port = c->port;
+ port+=c->port;
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- status = I810_IOREADW(card, port + OFF_PICB);
+ status = inw(port + OFF_PICB);
else
- status = I810_IOREADW(card, port + OFF_SR);
+ status = inw(port + OFF_SR);
#ifdef DEBUG_INTERRUPTS
printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
if(dmabuf->enable & ADC_RUNNING)
count = dmabuf->dmasize - count;
if (count >= (int)dmabuf->fragsize) {
- I810_IOWRITEB(I810_IOREADB(card, port+OFF_CR) | 1, card, port+OFF_CR);
+ outb(inb(port+OFF_CR) | 1, port+OFF_CR);
#ifdef DEBUG_INTERRUPTS
printk(" CONTINUE ");
#endif
}
}
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_PICB);
+ outw(status & DMA_INT_MASK, port + OFF_PICB);
else
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_SR);
+ outw(status & DMA_INT_MASK, port + OFF_SR);
}
#ifdef DEBUG_INTERRUPTS
printk(")\n");
spin_lock(&card->lock);
- status = I810_IOREADL(card, GLOB_STA);
+ status = inl(card->iobase + GLOB_STA);
if(!(status & INT_MASK))
{
i810_channel_interrupt(card);
/* clear 'em */
- I810_IOWRITEL(status & INT_MASK, card, GLOB_STA);
+ outl(status & INT_MASK, card->iobase + GLOB_STA);
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static ssize_t i810_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
+ struct i810_card *card=state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_read called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
static ssize_t i810_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
+ struct i810_card *card=state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_write called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
__stop_adc(state);
}
if (c != NULL) {
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while ( I810_IOREADB(state->card, c->port+OFF_CR) & 2 )
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ while ( inb(state->card->iobase+c->port+OFF_CR) & 2 )
cpu_relax();
- I810_IOWRITEL((u32)state->card->chandma +
+ outl((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
+ state->card->iobase+c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card->iobase+c->port, 0);
}
spin_unlock_irqrestore(&state->card->lock, flags);
/* Global Status and Global Control register are now */
/* used to indicate this. */
- i_glob_cnt = I810_IOREADL(state->card, GLOB_CNT);
+ i_glob_cnt = inl(state->card->iobase + GLOB_CNT);
/* Current # of channels enabled */
if ( i_glob_cnt & 0x0100000 )
switch ( val ) {
case 2: /* 2 channels is always supported */
- I810_IOWRITEL(i_glob_cnt & 0xffcfffff,
- state->card, GLOB_CNT);
+ outl(i_glob_cnt & 0xffcfffff,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings???? */
break;
case 4: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 4 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x100000,
- state->card, GLOB_CNT);
+ outl((i_glob_cnt & 0xffcfffff) | 0x100000,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
break;
case 6: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 6 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x200000,
- state->card, GLOB_CNT);
+ outl((i_glob_cnt & 0xffcfffff) | 0x200000,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
} else {
i810_set_dac_rate(state, 8000);
/* Put the ACLink in 2 channel mode by default */
- i = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(i & 0xffcfffff, card, GLOB_CNT);
+ i = inl(card->iobase + GLOB_CNT);
+ outl(i & 0xffcfffff, card->iobase + GLOB_CNT);
}
}
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
- return nonseekable_open(inode, file);
+ return 0;
}
static int i810_release(struct inode *inode, struct file *file)
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (I810_IOREADB(card, CAS) & 1))
+ while(count-- && (inb(card->iobase + CAS) & 1))
udelay(1);
return inw(card->ac97base + reg_set);
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (I810_IOREADB(card, CAS) & 1))
+ while(count-- && (inb(card->iobase + CAS) & 1))
udelay(1);
outw(data, card->ac97base + reg_set);
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
}
return -ENODEV;
static inline int i810_ac97_exists(struct i810_card *card, int ac97_number)
{
- u32 reg = I810_IOREADL(card, GLOB_STA);
+ u32 reg = inl(card->iobase + GLOB_STA);
switch (ac97_number) {
case 0:
return reg & (1<<8);
static int i810_ac97_power_up_bus(struct i810_card *card)
{
- u32 reg = I810_IOREADL(card, GLOB_CNT);
+ u32 reg = inl(card->iobase + GLOB_CNT);
int i;
int primary_codec_id = 0;
reg&=~8; /* ACLink on */
/* At this point we deassert AC_RESET # */
- I810_IOWRITEL(reg , card, GLOB_CNT);
+ outl(reg , card->iobase + GLOB_CNT);
/* We must now allow time for the Codec initialisation.
600mS is the specified time */
for(i=0;i<10;i++)
{
- if((I810_IOREADL(card, GLOB_CNT)&4)==0)
+ if((inl(card->iobase+GLOB_CNT)&4)==0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
* See if the primary codec comes ready. This must happen
* before we start doing DMA stuff
*/
- /* see i810_ac97_init for the next 10 lines (jsaw) */
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
+ /* see i810_ac97_init for the next 7 lines (jsaw) */
+ inw(card->ac97base);
if (ich_use_mmio(card)) {
primary_codec_id = (int) readl(card->iobase_mmio + SDM) & 0x3;
printk(KERN_INFO "i810_audio: Primary codec has ID %d\n",
else
printk("no response.\n");
}
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
+ inw(card->ac97base);
return 1;
}
/* to check.... */
card->channels = 2;
- reg = I810_IOREADL(card, GLOB_STA);
+ reg = inl(card->iobase + GLOB_STA);
if ( reg & 0x0200000 )
card->channels = 6;
else if ( reg & 0x0100000 )
card->channels = 4;
printk(KERN_INFO "i810_audio: Audio Controller supports %d channels.\n", card->channels);
printk(KERN_INFO "i810_audio: Defaulting to base 2 channel mode.\n");
- reg = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(reg & 0xffcfffff, card, GLOB_CNT);
+ reg = inl(card->iobase + GLOB_CNT);
+ outl(reg & 0xffcfffff, card->iobase + GLOB_CNT);
for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++)
card->ac97_codec[num_ac97] = NULL;
for (num_ac97 = 0; num_ac97 < nr_ac97_max; num_ac97++) {
/* codec reset */
printk(KERN_INFO "i810_audio: Resetting connection %d\n", num_ac97);
- if (card->use_mmio)
- readw(card->ac97base_mmio + 0x80*num_ac97);
- else
- inw(card->ac97base + 0x80*num_ac97);
+ if (card->use_mmio) readw(card->ac97base_mmio + 0x80*num_ac97);
+ else inw(card->ac97base + 0x80*num_ac97);
/* If we have the SDATA_IN Map Register, as on ICH4, we
do not loop thru all possible codec IDs but thru all
goto config_out;
}
dmabuf->count = dmabuf->dmasize;
- CIV_TO_LVI(card, dmabuf->write_channel->port, -1);
+ CIV_TO_LVI(card->iobase+dmabuf->write_channel->port, -1);
local_irq_save(flags);
start_dac(state);
offset = i810_get_dma_addr(state, 0);
return -ENODEV;
}
+ if( pci_resource_start(pci_dev, 1) == 0)
+ {
+ /* MMIO only ICH5 .. here be dragons .. */
+ printk(KERN_ERR "i810_audio: Pure MMIO interfaces not yet supported.\n");
+ return -ENODEV;
+ }
+
if ((card = kmalloc(sizeof(struct i810_card), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "i810_audio: out of memory\n");
return -ENOMEM;
card->ac97base = pci_resource_start (pci_dev, 0);
card->iobase = pci_resource_start (pci_dev, 1);
- if (!(card->ac97base) || !(card->iobase)) {
- card->ac97base = 0;
- card->iobase = 0;
- }
-
/* if chipset could have mmio capability, check it */
if (card_cap[pci_id->driver_data].flags & CAP_MMIO) {
card->ac97base_mmio_phys = pci_resource_start (pci_dev, 2);
}
}
- if (!(card->use_mmio) && (!(card->iobase) || !(card->ac97base))) {
- printk(KERN_ERR "i810_audio: No I/O resources available.\n");
- goto out_mem;
- }
-
card->irq = pci_dev->irq;
card->next = devs;
card->magic = I810_CARD_MAGIC;
break;
}
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int it8172_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, remainder, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
unsigned long flags;
int cnt, remainder, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int it8172_release(struct inode *inode, struct file *file)
if (!card)
return -ENODEV;
file->private_data = card;
- return nonseekable_open(inode, file);
+ return 0;
}
static int ess_release_mixdev(struct inode *inode, struct file *file)
unsigned char *combbuf = NULL;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
up(&s->open_sem);
spin_unlock_irqrestore(&c->lock, flags);
- return nonseekable_open(inode, file);
+ return 0;
}
static int m3_release(struct inode *inode, struct file *file)
file->private_data = card->ac97;
- return nonseekable_open(inode, file);
+ return 0;
}
static int m3_release_mixdev(struct inode *inode, struct file *file)
char *pinfiji = "Pinnacle/Fiji";
#endif
- if (!request_region(dev.io, dev.numio, "probing")) {
+ if (check_region(dev.io, dev.numio)) {
printk(KERN_ERR LOGNAME ": I/O port conflict\n");
return -ENODEV;
}
+ request_region(dev.io, dev.numio, "probing");
if (reset_dsp() < 0) {
release_region(dev.io, dev.numio);
/* Joystick */
pinnacle_devs[3].io0 = joystick_io;
- if (!request_region(cfg, 2, "Pinnacle/Fiji Config")) {
+ if (check_region(cfg, 2)) {
printk(KERN_ERR LOGNAME ": Config port 0x%x conflict\n", cfg);
return -EIO;
}
+ request_region(cfg, 2, "Pinnacle/Fiji Config");
if (msnd_pinnacle_cfg_devices(cfg, reset, pinnacle_devs)) {
printk(KERN_ERR LOGNAME ": Device configuration error\n");
release_region(cfg, 2);
break;
}
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int vrc5477_ac97_release_mixdev(struct inode *inode, struct file *file)
int copyCount;
size_t avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
unsigned long flags;
int copyCount, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
struct list_head *list;
struct vrc5477_ac97_state *s;
int ret=0;
-
- nonseekable_open(inode, file);
+
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
DBG(printk("device num %d open\n",devnum));
- nonseekable_open(in, f);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (dma->mmapped || !dma->opened)
return -ENXIO;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (dma->mmapped || !dma->opened)
return -ENXIO;
COMM ("mixer open");
- nonseekable_open(inode, file);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if (last_devc == NULL)
return 0;
- last_devc = NULL;
+ last_devc = 0;
if (hw_config->io_base <= 0)
{
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_midi_release(struct inode *inode, struct file *file)
outb(1, s->iosynth+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_dmfm_release(struct inode *inode, struct file *file)
}
+// ---------------------------------------------------------------------
+
+static loff_t cs4297a_llseek(struct file *file, loff_t offset, int origin)
+{
+ return -ESPIPE;
+}
+
+
// ---------------------------------------------------------------------
static int cs4297a_open_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_mixer_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = cs4297a_llseek,
.ioctl = cs4297a_ioctl_mixdev,
.open = cs4297a_open_mixdev,
.release = cs4297a_release_mixdev,
printk(KERN_INFO "cs4297a: cs4297a_read()+ %d \n", count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4297a: cs4297a_open()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_audio_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = cs4297a_llseek,
.read = cs4297a_read,
.write = cs4297a_write,
.poll = cs4297a_poll,
pr_debug("trident: trident_read called, count = %d\n", count);
VALIDATE_STATE(state);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
pr_debug("trident: trident_write called, count = %d\n", count);
VALIDATE_STATE(state);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
/*
* Guard against an mmap or ioctl while writing
pr_debug("trident: open virtual channel %d, hard channel %d\n",
state->virt, dmabuf->channel->num);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
match:
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
static int
* NO WARRANTY
*
* For a list of known bugs (errata) and documentation,
- * see via-audio.pdf in Documentation/DocBook.
+ * see via-audio.pdf in linux/Documentation/DocBook.
* If this documentation does not exist, run "make pdfdocs".
*/
file->private_data = card->ac97;
DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
+ return 0;
}
static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
card = file->private_data;
assert (card != NULL);
+ if (ppos != &file->f_pos) {
+ DPRINTK ("EXIT, returning -ESPIPE\n");
+ return -ESPIPE;
+ }
+
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
card = file->private_data;
assert (card != NULL);
+ if (ppos != &file->f_pos) {
+ DPRINTK ("EXIT, returning -ESPIPE\n");
+ return -ESPIPE;
+ }
+
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
}
DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
+ return 0;
}
/*
* Sound driver for Silicon Graphics 320 and 540 Visual Workstations'
- * onboard audio. See notes in Documentation/sound/oss/vwsnd .
+ * onboard audio. See notes in ../../Documentation/sound/oss/vwsnd .
*
* Copyright 1999 Silicon Graphics, Inc. All rights reserved.
*
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0 }
+ { 0x0, 0x0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned int) rbuf;
- rbuf = NULL;
+ rbuf = 0;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
+ if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
dev.sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PATCH, NULL, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PATCH, 0, buf)) {
printk (KERN_ERR LOGNAME "download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, 0, buf)) {
printk (KERN_WARNING LOGNAME "download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
+ if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
printk (KERN_WARNING LOGNAME "can't get memory stats.\n");
return -1;
} else {
UINT16 sample_short;
UINT32 length;
- UINT16 __user *data_end = NULL;
+ UINT16 __user *data_end = 0;
unsigned int i;
const int max_blksize = 4096/2;
unsigned int written;
if (wavefront_cmd (header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- NULL, sample_hdr)) {
+ 0, sample_hdr)) {
printk (KERN_WARNING LOGNAME "sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, 0, 0)) {
printk (KERN_WARNING LOGNAME "download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
printk (KERN_ERR LOGNAME "download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
printk (KERN_ERR LOGNAME "download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (wavefront_cmd (WFC_SET_NVOICES, NULL, voices)) {
+ if (wavefront_cmd (WFC_SET_NVOICES, 0, voices)) {
printk (KERN_WARNING LOGNAME
"cannot set number of voices to 32.\n");
goto gone_bad;
unsigned int swptr;
int cnt; /* This many to go in this revolution */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
YMFDBGW("ymf_write: count %d\n", count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
#endif
up(&unit->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
out_nodma:
/*
match:
file->private_data = unit->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
static int ymf_ioctl_mixdev(struct inode *inode, struct file *file,
codec->dma_area_ba = pba;
codec->dma_area_size = size + 0xff;
- off = (unsigned long)ptr & 0xff;
- if (off) {
+ if ((off = ((uint) ptr) & 0xff) != 0) {
ptr += 0x100 - off;
pba += 0x100 - off;
}
#endif
/* Global resources */
- s8 mixcapt[2];
- s8 mixplayb[4];
+ char mixcapt[2];
+ char mixplayb[4];
#ifndef CHIP_AU8820
- s8 mixspdif[2];
- s8 mixa3d[2]; /* mixers which collect all a3d streams. */
- s8 mixxtlk[2]; /* crosstalk canceler mixer inputs. */
+ char mixspdif[2];
+ char mixa3d[2]; /* mixers which collect all a3d streams. */
+ char mixxtlk[2]; /* crosstalk canceler mixer inputs. */
#endif
u32 fixed_res[5];
static int
snd_vortex_a3d_get(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- //a3dsrc_t *a = kcontrol->private_data;
+ //a3dsrc_t *a = (a3dsrc_t*)(kcontrol->private_value);
/* No read yet. Would this be really useable/needed ? */
return 0;
snd_vortex_a3d_hrtf_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int changed = 1, i;
int coord[6];
for (i = 0; i < 6; i++)
snd_vortex_a3d_itd_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int coord[6];
int i, changed = 1;
for (i = 0; i < 6; i++)
snd_vortex_a3d_ild_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int changed = 1;
int l, r;
/* There may be some scale tranlation needed here. */
snd_vortex_a3d_filter_put(snd_kcontrol_t
* kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int i, changed = 1;
int params[6];
for (i = 0; i < 6; i++)
}
static snd_kcontrol_new_t vortex_a3d_kcontrol __devinitdata = {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "Playback PCM advanced processing",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_vortex_a3d_hrtf_info,
- .get = snd_vortex_a3d_get,
- .put = snd_vortex_a3d_hrtf_put,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,.name =
+ "Playback PCM advanced processing",.index =
+ 0,.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,.private_value =
+ 0,.info = snd_vortex_a3d_hrtf_info,.get =
+ snd_vortex_a3d_get,.put = snd_vortex_a3d_hrtf_put
};
/* Control (un)registration. */
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_HRTF;
kcontrol->info = snd_vortex_a3d_hrtf_info;
kcontrol->put = snd_vortex_a3d_hrtf_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_ITD;
kcontrol->info = snd_vortex_a3d_itd_info;
kcontrol->put = snd_vortex_a3d_itd_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_GAINS;
kcontrol->info = snd_vortex_a3d_ild_info;
kcontrol->put = snd_vortex_a3d_ild_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_FILTER;
kcontrol->info = snd_vortex_a3d_filter_info;
kcontrol->put = snd_vortex_a3d_filter_put;
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 0);
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef WIN9X
+#if WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 1);
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef WIN9X
+#if WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef QUERY_HARDWARE
+#if QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_PLAY_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef QUERY_HARDWARE
+#if QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_REC_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
}
static long snd_cs4281_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (pos + size > CS4281_BA0_SIZE)
- size = (long)CS4281_BA0_SIZE - pos;
+ if (file->f_pos + size > CS4281_BA0_SIZE)
+ size = (long)CS4281_BA0_SIZE - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba0 + pos, size))
+ if (copy_to_user_fromio(buf, chip->ba0 + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
static long snd_cs4281_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (pos + size > CS4281_BA1_SIZE)
- size = (long)CS4281_BA1_SIZE - pos;
+ if (file->f_pos + size > CS4281_BA1_SIZE)
+ size = (long)CS4281_BA1_SIZE - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba1 + pos, size))
+ if (copy_to_user_fromio(buf, chip->ba1 + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
*/
static long snd_cs46xx_io_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
snd_cs46xx_region_t *region = (snd_cs46xx_region_t *)entry->private_data;
size = count;
- if (pos + (size_t)size > region->size)
- size = region->size - pos;
+ if (file->f_pos + (size_t)size > region->size)
+ size = region->size - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, region->remap_addr + pos, size))
+ if (copy_to_user_fromio(buf, region->remap_addr + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
#define TOTAL_SIZE_CODE (0x200*8)
static long snd_emu10k1_fx8010_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
emu10k1_t *emu = snd_magic_cast(emu10k1_t, entry->private_data, return -ENXIO);
offset = emu->audigy ? A_FXGPREGBASE : FXGPREGBASE;
}
size = count;
- if (pos + size > entry->size)
- size = (long)entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = (long)entry->size - file->f_pos;
if (size > 0) {
unsigned int *tmp;
long res;
unsigned int idx;
if ((tmp = kmalloc(size + 8, GFP_KERNEL)) == NULL)
return -ENOMEM;
- for (idx = 0; idx < ((pos & 3) + size + 3) >> 2; idx++)
- tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0);
- if (copy_to_user(buf, ((char *)tmp) + (pos & 3), size))
+ for (idx = 0; idx < ((file->f_pos & 3) + size + 3) >> 2; idx++)
+ tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (file->f_pos >> 2), 0);
+ if (copy_to_user(buf, ((char *)tmp) + (file->f_pos & 3), size))
res = -EFAULT;
else {
res = size;
+ file->f_pos += size;
}
kfree(tmp);
return res;
if ((val & 0xff00) < 0x1f00)
val += 0x0100;
}
- if (val == 0x1f1f)
- val |= 0x8000;
snd_ac97_write_cache(chip->ac97, AC97_MASTER, val);
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_volume->id);
snd_ice1712_hoontech_cards,
snd_ice1712_delta_cards,
snd_ice1712_ews_cards,
- NULL,
+ 0,
};
static unsigned char __devinit snd_ice1712_read_i2c(ice1712_t *ice,
return -EBUSY; /* FIXME: should handle blocking mode properly */
}
up(&ice->open_mutex);
- runtime->private_data = (void*)(1UL << (substream->number + 4));
+ runtime->private_data = (void*)(1 << (substream->number + 4));
ice->playback_con_substream_ds[substream->number] = substream;
runtime->hw = snd_vt1724_2ch_stereo;
snd_pcm_set_sync(substream);
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
snd_vt1724_aureon_cards,
- NULL,
+ 0,
};
{ 0x5455, "ALi M5455" },
{ 0x746d, "AMD AMD8111" },
#endif
- { 0 },
+ { 0, 0 },
};
static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
.amp_gpio = 0x03,
},
/* END */
- { NULL }
+ { 0 }
};
mixart_BA0 proc interface for BAR 0 - read callback
*/
static long snd_mixart_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(pos + count > MIXART_BA0_SIZE)
- count = (long)(MIXART_BA0_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
+ if(file->f_pos + count > MIXART_BA0_SIZE)
+ count = (long)(MIXART_BA0_SIZE - file->f_pos);
+ if(copy_to_user_fromio(buf, MIXART_MEM( mgr, file->f_pos ), count))
return -EFAULT;
+ file->f_pos += count;
return count;
}
mixart_BA1 proc interface for BAR 1 - read callback
*/
static long snd_mixart_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(pos + count > MIXART_BA1_SIZE)
- count = (long)(MIXART_BA1_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
+ if(file->f_pos + count > MIXART_BA1_SIZE)
+ count = (long)(MIXART_BA1_SIZE - file->f_pos);
+ if(copy_to_user_fromio(buf, MIXART_REG( mgr, file->f_pos ), count))
return -EFAULT;
+ file->f_pos += count;
return count;
}
snd_nm256_capture_copy(snd_pcm_substream_t *substream,
int channel, /* not used (interleaved data) */
snd_pcm_uframes_t pos,
- void __user *dst,
+ void *dst,
snd_pcm_uframes_t count)
{
snd_pcm_runtime_t *runtime = substream->runtime;
hdsp->irq = -1;
hdsp->state = 0;
- hdsp->midi[0].rmidi = NULL;
- hdsp->midi[1].rmidi = NULL;
- hdsp->midi[0].input = NULL;
- hdsp->midi[1].input = NULL;
- hdsp->midi[0].output = NULL;
- hdsp->midi[1].output = NULL;
+ hdsp->midi[0].rmidi = 0;
+ hdsp->midi[1].rmidi = 0;
+ hdsp->midi[0].input = 0;
+ hdsp->midi[1].input = 0;
+ hdsp->midi[0].output = 0;
+ hdsp->midi[1].output = 0;
spin_lock_init(&hdsp->midi[0].lock);
spin_lock_init(&hdsp->midi[1].lock);
hdsp->iobase = 0;
- hdsp->res_port = NULL;
+ hdsp->res_port = 0;
hdsp->control_register = 0;
hdsp->control2_register = 0;
hdsp->io_type = Undefined;
sonic->mode |= SV_MODE_PLAY;
sonic->playback_substream = substream;
runtime->hw = snd_sonicvibes_playback;
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1);
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, 0, SNDRV_PCM_HW_PARAM_RATE, -1);
return 0;
}
menu "ALSA PowerMac devices"
depends on SND!=n && PPC
-comment "ALSA PowerMac requires I2C"
- depends on SND && I2C=n
-
config SND_POWERMAC
tristate "PowerMac (AWACS, DACA, Burgundy, Tumbler, Keywest)"
- depends on SND && I2C
+ depends on SND
select SND_PCM
endmenu
sound = sound->next;
if (! sound)
return -ENODEV;
- prop = (unsigned int *) get_property(sound, "sub-frame", NULL);
+ prop = (unsigned int *) get_property(sound, "sub-frame", 0);
if (prop && *prop < 16)
chip->subframe = *prop;
/* This should be verified on older screamers */
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
- prop = (unsigned int *)get_property(sound, "device-id", NULL);
+ prop = (unsigned int *)get_property(sound, "device-id", 0);
if (prop)
chip->device_id = *prop;
chip->has_iic = (find_devices("perch") != NULL);
{
if (u->urb) {
usb_free_urb(u->urb);
- u->urb = NULL;
+ u->urb = 0;
}
if (u->buf) {
kfree(u->buf);
- u->buf = NULL;
+ u->buf = 0;
}
}
release_urb_ctx(&subs->syncurb[i]);
if (subs->tmpbuf) {
kfree(subs->tmpbuf);
- subs->tmpbuf = NULL;
+ subs->tmpbuf = 0;
}
subs->nurbs = 0;
}
{
if (kctl->private_data) {
snd_magic_kfree((void *)kctl->private_data);
- kctl->private_data = NULL;
+ kctl->private_data = 0;
}
}
usb_mixer_elem_info_t *cval = snd_magic_cast(usb_mixer_elem_info_t, kctl->private_data,);
num_ins = cval->max;
snd_magic_kfree(cval);
- kctl->private_data = NULL;
+ kctl->private_data = 0;
}
if (kctl->private_value) {
char **itemlist = (char **)kctl->private_value;